[llvm] b0c2f90 - [RISCV] Merge more rv32/rv64 vector intrinsic tests that contain the same content.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sat Oct 8 18:38:31 PDT 2022


Author: Craig Topper
Date: 2022-10-08T18:30:40-07:00
New Revision: b0c2f9045394211c489c9e7777deef682e6701ba

URL: https://github.com/llvm/llvm-project/commit/b0c2f9045394211c489c9e7777deef682e6701ba
DIFF: https://github.com/llvm/llvm-project/commit/b0c2f9045394211c489c9e7777deef682e6701ba.diff

LOG: [RISCV] Merge more rv32/rv64 vector intrinsic tests that contain the same content.

Added: 
    llvm/test/CodeGen/RISCV/rvv/vadc.ll
    llvm/test/CodeGen/RISCV/rvv/vand.ll
    llvm/test/CodeGen/RISCV/rvv/vdiv.ll
    llvm/test/CodeGen/RISCV/rvv/vdivu.ll
    llvm/test/CodeGen/RISCV/rvv/vleff.ll
    llvm/test/CodeGen/RISCV/rvv/vmacc.ll
    llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in.ll
    llvm/test/CodeGen/RISCV/rvv/vmadc.ll
    llvm/test/CodeGen/RISCV/rvv/vmadd.ll
    llvm/test/CodeGen/RISCV/rvv/vmax.ll
    llvm/test/CodeGen/RISCV/rvv/vmaxu.ll
    llvm/test/CodeGen/RISCV/rvv/vmin.ll
    llvm/test/CodeGen/RISCV/rvv/vminu.ll
    llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in.ll
    llvm/test/CodeGen/RISCV/rvv/vmsbc.ll
    llvm/test/CodeGen/RISCV/rvv/vmseq.ll
    llvm/test/CodeGen/RISCV/rvv/vmsge.ll
    llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll
    llvm/test/CodeGen/RISCV/rvv/vmsgt.ll
    llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll
    llvm/test/CodeGen/RISCV/rvv/vmsle.ll
    llvm/test/CodeGen/RISCV/rvv/vmsleu.ll
    llvm/test/CodeGen/RISCV/rvv/vmslt.ll
    llvm/test/CodeGen/RISCV/rvv/vmsltu.ll
    llvm/test/CodeGen/RISCV/rvv/vmsne.ll
    llvm/test/CodeGen/RISCV/rvv/vnmsac.ll
    llvm/test/CodeGen/RISCV/rvv/vnmsub.ll
    llvm/test/CodeGen/RISCV/rvv/vor.ll
    llvm/test/CodeGen/RISCV/rvv/vrem.ll
    llvm/test/CodeGen/RISCV/rvv/vremu.ll
    llvm/test/CodeGen/RISCV/rvv/vsbc.ll
    llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
    llvm/test/CodeGen/RISCV/rvv/vselect-int.ll
    llvm/test/CodeGen/RISCV/rvv/vxor.ll

Modified: 
    

Removed: 
    llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsbc-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsbc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vsbc-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vselect-int-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vselect-int-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll
deleted file mode 100644
index d62e4901b421f..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll
+++ /dev/null
@@ -1,1456 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i8> @intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i8> @intrinsic_vadc_vvm_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i8> @intrinsic_vadc_vvm_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i8> @intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i8> @intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vadc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i32);
-
-define <vscale x 32 x i8> @intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vadc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i1>,
-  i32);
-
-define <vscale x 64 x i8> @intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vadc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    <vscale x 64 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i16> @intrinsic_vadc_vvm_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i16> @intrinsic_vadc_vvm_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i16> @intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vadc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i16> @intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vadc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i1>,
-  i32);
-
-define <vscale x 32 x i16> @intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vadc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    <vscale x 32 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vadc_vvm_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vadc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vadc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vadc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vadc_vvm_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vadc_vvm_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vadc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vadc_vvm_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vadc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vadc_vvm_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT:    vadc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i8> @intrinsic_vadc_vxm_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    <vscale x 1 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i8> @intrinsic_vadc_vxm_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i8_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    <vscale x 2 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i8> @intrinsic_vadc_vxm_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i8_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    <vscale x 4 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i8> @intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    <vscale x 8 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i8> @intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    <vscale x 16 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i1>,
-  i32);
-
-define <vscale x 32 x i8> @intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    <vscale x 32 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i8,
-  <vscale x 64 x i1>,
-  i32);
-
-define <vscale x 64 x i8> @intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    i8 %1,
-    <vscale x 64 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i16> @intrinsic_vadc_vxm_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i16_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    <vscale x 1 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i16> @intrinsic_vadc_vxm_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i16_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    <vscale x 2 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    <vscale x 4 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i16> @intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    <vscale x 8 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i16> @intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    <vscale x 16 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i16,
-  <vscale x 32 x i1>,
-  i32);
-
-define <vscale x 32 x i16> @intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    i16 %1,
-    <vscale x 32 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vadc_vxm_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i32_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    <vscale x 1 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    <vscale x 2 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    <vscale x 4 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    <vscale x 8 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    i32 %1,
-    <vscale x 16 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT:    vlse64.v v9, (a0), zero
-; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    i64 %1,
-    <vscale x 1 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vadc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    i64 %1,
-    <vscale x 2 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vadc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    i64 %1,
-    <vscale x 4 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vadc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    i64 %1,
-    <vscale x 8 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vadc_vim_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    i8 -9,
-    <vscale x 1 x i1> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-define <vscale x 2 x i8> @intrinsic_vadc_vim_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv2i8_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    i8 9,
-    <vscale x 2 x i1> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-define <vscale x 4 x i8> @intrinsic_vadc_vim_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv4i8_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    i8 -9,
-    <vscale x 4 x i1> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-define <vscale x 8 x i8> @intrinsic_vadc_vim_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv8i8_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    i8 9,
-    <vscale x 8 x i1> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-define <vscale x 16 x i8> @intrinsic_vadc_vim_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv16i8_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    i8 -9,
-    <vscale x 16 x i1> %1,
-    i32 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-define <vscale x 32 x i8> @intrinsic_vadc_vim_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv32i8_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    i8 9,
-    <vscale x 32 x i1> %1,
-    i32 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-define <vscale x 64 x i8> @intrinsic_vadc_vim_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv64i8_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    i8 -9,
-    <vscale x 64 x i1> %1,
-    i32 %2)
-
-  ret <vscale x 64 x i8> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vadc_vim_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv1i16_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    i16 9,
-    <vscale x 1 x i1> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vadc_vim_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv2i16_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    i16 -9,
-    <vscale x 2 x i1> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vadc_vim_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv4i16_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    i16 9,
-    <vscale x 4 x i1> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vadc_vim_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv8i16_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    i16 -9,
-    <vscale x 8 x i1> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vadc_vim_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv16i16_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    i16 9,
-    <vscale x 16 x i1> %1,
-    i32 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vadc_vim_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv32i16_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    i16 -9,
-    <vscale x 32 x i1> %1,
-    i32 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vadc_vim_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv1i32_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    i32 9,
-    <vscale x 1 x i1> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vadc_vim_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv2i32_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    i32 -9,
-    <vscale x 2 x i1> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vadc_vim_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv4i32_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    i32 9,
-    <vscale x 4 x i1> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vadc_vim_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv8i32_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    i32 -9,
-    <vscale x 8 x i1> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 16 x i32> @intrinsic_vadc_vim_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv16i32_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    i32 9,
-    <vscale x 16 x i1> %1,
-    i32 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vadc_vim_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    i64 9,
-    <vscale x 1 x i1> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vadc_vim_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    i64 -9,
-    <vscale x 2 x i1> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vadc_vim_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    i64 9,
-    <vscale x 4 x i1> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vadc_vim_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vim_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    i64 -9,
-    <vscale x 8 x i1> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vadc.ll
similarity index 82%
rename from llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll
rename to llvm/test/CodeGen/RISCV/rvv/vadc.ll
index bf6ce4e573f54..629426abb2ccc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vadc.ll
@@ -1,14 +1,16 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
 declare <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -20,7 +22,7 @@ entry:
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
     <vscale x 1 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i8> %a
 }
@@ -30,9 +32,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vadc_vvm_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vadc_vvm_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -44,7 +46,7 @@ entry:
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
     <vscale x 2 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i8> %a
 }
@@ -54,9 +56,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vadc_vvm_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vadc_vvm_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -68,7 +70,7 @@ entry:
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
     <vscale x 4 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i8> %a
 }
@@ -78,9 +80,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -92,7 +94,7 @@ entry:
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i8> %a
 }
@@ -102,9 +104,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -116,7 +118,7 @@ entry:
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
     <vscale x 16 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i8> %a
 }
@@ -126,9 +128,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -140,7 +142,7 @@ entry:
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
     <vscale x 32 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 32 x i8> %a
 }
@@ -150,9 +152,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
+define <vscale x 64 x i8> @intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
@@ -164,7 +166,7 @@ entry:
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
     <vscale x 64 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 64 x i8> %a
 }
@@ -174,9 +176,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vadc_vvm_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vadc_vvm_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -188,7 +190,7 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
     <vscale x 1 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i16> %a
 }
@@ -198,9 +200,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vadc_vvm_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vadc_vvm_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -212,7 +214,7 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
     <vscale x 2 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i16> %a
 }
@@ -222,9 +224,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -236,7 +238,7 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i16> %a
 }
@@ -246,9 +248,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -260,7 +262,7 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
     <vscale x 8 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i16> %a
 }
@@ -270,9 +272,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -284,7 +286,7 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
     <vscale x 16 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i16> %a
 }
@@ -294,9 +296,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+define <vscale x 32 x i16> @intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
@@ -308,7 +310,7 @@ entry:
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
     <vscale x 32 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 32 x i16> %a
 }
@@ -318,9 +320,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vadc_vvm_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vadc_vvm_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -332,7 +334,7 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     <vscale x 1 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i32> %a
 }
@@ -342,9 +344,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -356,7 +358,7 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i32> %a
 }
@@ -366,9 +368,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -380,7 +382,7 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     <vscale x 4 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i32> %a
 }
@@ -390,9 +392,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -404,7 +406,7 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     <vscale x 8 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i32> %a
 }
@@ -414,9 +416,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
@@ -428,7 +430,7 @@ entry:
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
     <vscale x 16 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i32> %a
 }
@@ -438,9 +440,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vadc_vvm_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vadc_vvm_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -452,7 +454,7 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     <vscale x 1 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i64> %a
 }
@@ -462,9 +464,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vadc_vvm_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vadc_vvm_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -476,7 +478,7 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     <vscale x 2 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i64> %a
 }
@@ -486,9 +488,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vadc_vvm_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vadc_vvm_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -500,7 +502,7 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     <vscale x 4 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i64> %a
 }
@@ -510,9 +512,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vadc_vvm_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vadc_vvm_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
@@ -524,7 +526,7 @@ entry:
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
     <vscale x 8 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i64> %a
 }
@@ -534,9 +536,9 @@ declare <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vadc_vxm_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vadc_vxm_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -548,7 +550,7 @@ entry:
     <vscale x 1 x i8> %0,
     i8 %1,
     <vscale x 1 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i8> %a
 }
@@ -558,9 +560,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vadc_vxm_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vadc_vxm_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -572,7 +574,7 @@ entry:
     <vscale x 2 x i8> %0,
     i8 %1,
     <vscale x 2 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i8> %a
 }
@@ -582,9 +584,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vadc_vxm_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vadc_vxm_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -596,7 +598,7 @@ entry:
     <vscale x 4 x i8> %0,
     i8 %1,
     <vscale x 4 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i8> %a
 }
@@ -606,9 +608,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -620,7 +622,7 @@ entry:
     <vscale x 8 x i8> %0,
     i8 %1,
     <vscale x 8 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i8> %a
 }
@@ -630,9 +632,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -644,7 +646,7 @@ entry:
     <vscale x 16 x i8> %0,
     i8 %1,
     <vscale x 16 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i8> %a
 }
@@ -654,9 +656,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -668,7 +670,7 @@ entry:
     <vscale x 32 x i8> %0,
     i8 %1,
     <vscale x 32 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 32 x i8> %a
 }
@@ -678,9 +680,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8(
   <vscale x 64 x i8>,
   i8,
   <vscale x 64 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
+define <vscale x 64 x i8> @intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
@@ -692,7 +694,7 @@ entry:
     <vscale x 64 x i8> %0,
     i8 %1,
     <vscale x 64 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 64 x i8> %a
 }
@@ -702,9 +704,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vadc_vxm_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vadc_vxm_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -716,7 +718,7 @@ entry:
     <vscale x 1 x i16> %0,
     i16 %1,
     <vscale x 1 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i16> %a
 }
@@ -726,9 +728,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vadc_vxm_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vadc_vxm_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -740,7 +742,7 @@ entry:
     <vscale x 2 x i16> %0,
     i16 %1,
     <vscale x 2 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i16> %a
 }
@@ -750,9 +752,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -764,7 +766,7 @@ entry:
     <vscale x 4 x i16> %0,
     i16 %1,
     <vscale x 4 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i16> %a
 }
@@ -774,9 +776,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -788,7 +790,7 @@ entry:
     <vscale x 8 x i16> %0,
     i16 %1,
     <vscale x 8 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i16> %a
 }
@@ -798,9 +800,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -812,7 +814,7 @@ entry:
     <vscale x 16 x i16> %0,
     i16 %1,
     <vscale x 16 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i16> %a
 }
@@ -822,9 +824,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16(
   <vscale x 32 x i16>,
   i16,
   <vscale x 32 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+define <vscale x 32 x i16> @intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -836,7 +838,7 @@ entry:
     <vscale x 32 x i16> %0,
     i16 %1,
     <vscale x 32 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 32 x i16> %a
 }
@@ -846,9 +848,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vadc_vxm_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vadc_vxm_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -860,7 +862,7 @@ entry:
     <vscale x 1 x i32> %0,
     i32 %1,
     <vscale x 1 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i32> %a
 }
@@ -870,9 +872,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -884,7 +886,7 @@ entry:
     <vscale x 2 x i32> %0,
     i32 %1,
     <vscale x 2 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i32> %a
 }
@@ -894,9 +896,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -908,7 +910,7 @@ entry:
     <vscale x 4 x i32> %0,
     i32 %1,
     <vscale x 4 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i32> %a
 }
@@ -918,9 +920,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -932,7 +934,7 @@ entry:
     <vscale x 8 x i32> %0,
     i32 %1,
     <vscale x 8 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i32> %a
 }
@@ -942,9 +944,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32(
   <vscale x 16 x i32>,
   i32,
   <vscale x 16 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -956,7 +958,7 @@ entry:
     <vscale x 16 x i32> %0,
     i32 %1,
     <vscale x 16 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i32> %a
 }
@@ -966,21 +968,33 @@ declare <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
   <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vadc.vvm v8, v8, v9, v0
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT:    vadc.vxm v8, v8, a0, v0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64(
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
     i64 %1,
     <vscale x 1 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i64> %a
 }
@@ -990,21 +1004,33 @@ declare <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
   <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vadc.vvm v8, v8, v10, v0
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT:    vadc.vxm v8, v8, a0, v0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64(
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
     i64 %1,
     <vscale x 2 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1014,21 +1040,33 @@ declare <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
   <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vadc.vvm v8, v8, v12, v0
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT:    vadc.vxm v8, v8, a0, v0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64(
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
     i64 %1,
     <vscale x 4 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1038,247 +1076,259 @@ declare <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64(
   <vscale x 8 x i64>,
   i64,
   <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vadc.vvm v8, v8, v16, v0
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT:    vadc.vxm v8, v8, a0, v0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64(
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
     i64 %1,
     <vscale x 8 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i64> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vadc_vim_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vadc_vim_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
+; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8(
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
-    i8 9,
+    i8 -9,
     <vscale x 1 x i1> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vadc_vim_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vadc_vim_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
+; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8(
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8> %0,
-    i8 -9,
+    i8 9,
     <vscale x 2 x i1> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vadc_vim_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vadc_vim_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
+; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8(
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8> %0,
-    i8 9,
+    i8 -9,
     <vscale x 4 x i1> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vadc_vim_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vadc_vim_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
+; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8(
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8> %0,
-    i8 -9,
+    i8 9,
     <vscale x 8 x i1> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vadc_vim_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vadc_vim_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
+; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8(
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8> %0,
-    i8 9,
+    i8 -9,
     <vscale x 16 x i1> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vadc_vim_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vadc_vim_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
+; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8(
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8> %0,
-    i8 -9,
+    i8 9,
     <vscale x 32 x i1> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 64 x i8> @intrinsic_vadc_vim_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vadc_vim_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
+; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8(
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8> %0,
-    i8 9,
+    i8 -9,
     <vscale x 64 x i1> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i8> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vadc_vim_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vadc_vim_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
+; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16(
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
-    i16 -9,
+    i16 9,
     <vscale x 1 x i1> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vadc_vim_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vadc_vim_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
+; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16(
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
-    i16 9,
+    i16 -9,
     <vscale x 2 x i1> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vadc_vim_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vadc_vim_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
+; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16(
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
-    i16 -9,
+    i16 9,
     <vscale x 4 x i1> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vadc_vim_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vadc_vim_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
+; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16(
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
-    i16 9,
+    i16 -9,
     <vscale x 8 x i1> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vadc_vim_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vadc_vim_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
+; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16(
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
-    i16 -9,
+    i16 9,
     <vscale x 16 x i1> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 32 x i16> @intrinsic_vadc_vim_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vadc_vim_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
+; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16(
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
-    i16 9,
+    i16 -9,
     <vscale x 32 x i1> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vadc_vim_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vadc_vim_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -1290,12 +1340,12 @@ entry:
     <vscale x 1 x i32> %0,
     i32 -9,
     <vscale x 1 x i1> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vadc_vim_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vadc_vim_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -1307,12 +1357,12 @@ entry:
     <vscale x 2 x i32> %0,
     i32 9,
     <vscale x 2 x i1> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vadc_vim_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vadc_vim_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -1324,12 +1374,12 @@ entry:
     <vscale x 4 x i32> %0,
     i32 -9,
     <vscale x 4 x i1> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vadc_vim_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vadc_vim_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -1341,12 +1391,12 @@ entry:
     <vscale x 8 x i32> %0,
     i32 9,
     <vscale x 8 x i1> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 16 x i32> @intrinsic_vadc_vim_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vadc_vim_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
@@ -1358,12 +1408,12 @@ entry:
     <vscale x 16 x i32> %0,
     i32 -9,
     <vscale x 16 x i1> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
 
-define <vscale x 1 x i64> @intrinsic_vadc_vim_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vadc_vim_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv1i64_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -1375,12 +1425,12 @@ entry:
     <vscale x 1 x i64> %0,
     i64 9,
     <vscale x 1 x i1> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
 
-define <vscale x 2 x i64> @intrinsic_vadc_vim_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vadc_vim_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv2i64_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -1392,12 +1442,12 @@ entry:
     <vscale x 2 x i64> %0,
     i64 -9,
     <vscale x 2 x i1> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
 
-define <vscale x 4 x i64> @intrinsic_vadc_vim_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vadc_vim_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv4i64_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -1409,12 +1459,12 @@ entry:
     <vscale x 4 x i64> %0,
     i64 9,
     <vscale x 4 x i1> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
 
-define <vscale x 8 x i64> @intrinsic_vadc_vim_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vadc_vim_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv8i64_nxv8i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
@@ -1426,7 +1476,7 @@ entry:
     <vscale x 8 x i64> %0,
     i64 -9,
     <vscale x 8 x i1> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll
deleted file mode 100644
index c20886795dbb9..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll
+++ /dev/null
@@ -1,2800 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vand_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vand_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vand_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vand_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vand_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vand_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vand_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vand.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vand_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vand.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vand_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vand.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vand_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vand.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vand_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vand.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i1>,
-  i64,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8r.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vand.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    <vscale x 64 x i8> %2,
-    <vscale x 64 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vand_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vand_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vand_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vand_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vand_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vand_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vand_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vand.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vand_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vand.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vand_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vand.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vand_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vand.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vand_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vand.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re16.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vand.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    <vscale x 32 x i16> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vand_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vand_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vand_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vand_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vand_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vand.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vand_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vand.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vand_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vand.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vand_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vand.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vand_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vand.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re32.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vand.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vand_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vand_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vand_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vand.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vand_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vand.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vand_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vand.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vand_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vand.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vand_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT:    vand.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vand_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re64.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vand.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i8,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vand_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vand_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i8 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i8,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vand_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv2i8_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vand_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i8_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i8 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i8,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vand_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv4i8_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vand_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i8_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i8 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i8,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vand_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv8i8_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vand_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i8_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i8 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i8,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vand_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv16i8_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vand_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i8_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vand.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i8 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i8,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vand_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv32i8_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vand_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv32i8_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vand.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i8 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i8,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vand_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv64i8_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i8,
-  <vscale x 64 x i1>,
-  i64,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vand_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv64i8_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vand.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    i8 %2,
-    <vscale x 64 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i16,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vand_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv1i16_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vand_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i16_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i16 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i16,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vand_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv2i16_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vand_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i16_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i16 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i16,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vand_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv4i16_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vand_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i16_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i16 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i16,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vand_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv8i16_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vand_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i16_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vand.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i16 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i16,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vand_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv16i16_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vand_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i16_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vand.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i16 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i16,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vand_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv32i16_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i16,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vand_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv32i16_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vand.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i16 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vand_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv1i32_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vand_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i32_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vand_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv2i32_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vand_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i32_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vand_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv4i32_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vand_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i32_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vand.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vand_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv8i32_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vand_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i32_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vand.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vand_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv16i32_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vand_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i32_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vand.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i32 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vand_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vand_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vand_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vand_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vand.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vand_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vand_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vand.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vand_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vand_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vand.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i64 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vand_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    i8 9,
-    i64 %1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vand_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i8 9,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-define <vscale x 2 x i8> @intrinsic_vand_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv2i8_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    i8 9,
-    i64 %1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-define <vscale x 2 x i8> @intrinsic_vand_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i8_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i8 9,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-define <vscale x 4 x i8> @intrinsic_vand_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv4i8_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    i8 9,
-    i64 %1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-define <vscale x 4 x i8> @intrinsic_vand_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i8_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i8 9,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-define <vscale x 8 x i8> @intrinsic_vand_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv8i8_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    i8 9,
-    i64 %1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-define <vscale x 8 x i8> @intrinsic_vand_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i8_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i8 9,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-define <vscale x 16 x i8> @intrinsic_vand_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv16i8_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    i8 9,
-    i64 %1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-define <vscale x 16 x i8> @intrinsic_vand_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i8_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vand.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i8 9,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-define <vscale x 32 x i8> @intrinsic_vand_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv32i8_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    i8 9,
-    i64 %1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-define <vscale x 32 x i8> @intrinsic_vand_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv32i8_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vand.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i8 9,
-    <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-define <vscale x 64 x i8> @intrinsic_vand_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv64i8_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    i8 9,
-    i64 %1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-define <vscale x 64 x i8> @intrinsic_vand_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv64i8_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT:    vand.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    i8 9,
-    <vscale x 64 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vand_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv1i16_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    i16 9,
-    i64 %1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vand_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i16_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i16 9,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vand_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv2i16_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    i16 9,
-    i64 %1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vand_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i16_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i16 9,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vand_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv4i16_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    i16 9,
-    i64 %1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vand_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i16_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i16 9,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vand_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv8i16_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    i16 9,
-    i64 %1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vand_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i16_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vand.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i16 9,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vand_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv16i16_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    i16 9,
-    i64 %1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vand_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i16_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vand.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i16 9,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vand_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv32i16_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    i16 9,
-    i64 %1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vand_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv32i16_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vand.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i16 9,
-    <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vand_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv1i32_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    i32 9,
-    i64 %1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vand_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i32_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i32 9,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vand_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv2i32_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    i32 9,
-    i64 %1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vand_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i32_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i32 9,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vand_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv4i32_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    i32 9,
-    i64 %1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vand_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i32_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vand.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i32 9,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vand_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv8i32_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    i32 9,
-    i64 %1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vand_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i32_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vand.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i32 9,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 16 x i32> @intrinsic_vand_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv16i32_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    i32 9,
-    i64 %1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 16 x i32> @intrinsic_vand_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i32_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vand.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i32 9,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vand_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vand_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 9,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vand_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vand_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vand.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 9,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vand_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vand_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vand.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 9,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vand_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vand_vi_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vand_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vand.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i64 9,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vand.ll
similarity index 83%
rename from llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vand.ll
index f09efe0324b56..8bbb0092b7edb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vand.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
 declare <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -18,7 +20,7 @@ entry:
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -28,10 +30,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -43,7 +45,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -52,9 +54,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vand_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vand_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -65,7 +67,7 @@ entry:
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -75,10 +77,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vand_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vand_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -90,7 +92,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -99,9 +101,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vand_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vand_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -112,7 +114,7 @@ entry:
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -122,10 +124,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vand_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vand_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -137,7 +139,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -146,9 +148,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vand_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vand_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -159,7 +161,7 @@ entry:
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -169,10 +171,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vand_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vand_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -184,7 +186,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -193,9 +195,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vand_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vand_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -206,7 +208,7 @@ entry:
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -216,10 +218,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vand_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vand_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -231,7 +233,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -240,9 +242,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vand_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vand_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -253,7 +255,7 @@ entry:
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
@@ -263,10 +265,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vand_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vand_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -278,7 +280,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -287,9 +289,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vand_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vand_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
@@ -300,7 +302,7 @@ entry:
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i8> %a
 }
@@ -310,10 +312,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8r.v v24, (a0)
@@ -326,7 +328,7 @@ entry:
     <vscale x 64 x i8> %1,
     <vscale x 64 x i8> %2,
     <vscale x 64 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 64 x i8> %a
 }
@@ -335,9 +337,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vand_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vand_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -348,7 +350,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -358,10 +360,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vand_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vand_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -373,7 +375,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -382,9 +384,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vand_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vand_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -395,7 +397,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -405,10 +407,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vand_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vand_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -420,7 +422,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -429,9 +431,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vand_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vand_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -442,7 +444,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -452,10 +454,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vand_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vand_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -467,7 +469,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -476,9 +478,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vand_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vand_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -489,7 +491,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -499,10 +501,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vand_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vand_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -514,7 +516,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -523,9 +525,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vand_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vand_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -536,7 +538,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -546,10 +548,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vand_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vand_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -561,7 +563,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -570,9 +572,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vand_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vand_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
@@ -583,7 +585,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -593,10 +595,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
@@ -609,7 +611,7 @@ entry:
     <vscale x 32 x i16> %1,
     <vscale x 32 x i16> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -618,9 +620,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vand_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vand_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -631,7 +633,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -641,10 +643,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vand_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vand_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -656,7 +658,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -665,9 +667,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vand_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vand_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -678,7 +680,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -688,10 +690,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vand_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vand_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -703,7 +705,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -712,9 +714,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vand_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vand_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -725,7 +727,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -735,10 +737,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vand_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vand_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -750,7 +752,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -759,9 +761,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vand_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vand_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -772,7 +774,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -782,10 +784,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vand_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vand_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -797,7 +799,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -806,9 +808,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vand_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vand_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
@@ -819,7 +821,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -829,10 +831,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
@@ -845,7 +847,7 @@ entry:
     <vscale x 16 x i32> %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -854,9 +856,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vand_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vand_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -867,7 +869,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -877,10 +879,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vand_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vand_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -892,7 +894,7 @@ entry:
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -901,9 +903,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vand_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vand_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -914,7 +916,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -924,10 +926,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vand_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vand_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -939,7 +941,7 @@ entry:
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -948,9 +950,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vand_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vand_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -961,7 +963,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -971,10 +973,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vand_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vand_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -986,7 +988,7 @@ entry:
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -995,9 +997,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vand_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vand_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
@@ -1008,7 +1010,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1018,10 +1020,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vand_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vand_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
@@ -1034,7 +1036,7 @@ entry:
     <vscale x 8 x i64> %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1043,9 +1045,9 @@ declare <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vand_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vand_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -1056,7 +1058,7 @@ entry:
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -1066,10 +1068,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vand_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vand_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -1081,7 +1083,7 @@ entry:
     <vscale x 1 x i8> %1,
     i8 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -1090,9 +1092,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vand_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vand_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -1103,7 +1105,7 @@ entry:
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -1113,10 +1115,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vand_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vand_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -1128,7 +1130,7 @@ entry:
     <vscale x 2 x i8> %1,
     i8 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -1137,9 +1139,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vand_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vand_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -1150,7 +1152,7 @@ entry:
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -1160,10 +1162,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vand_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vand_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -1175,7 +1177,7 @@ entry:
     <vscale x 4 x i8> %1,
     i8 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -1184,9 +1186,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vand_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vand_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -1197,7 +1199,7 @@ entry:
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1207,10 +1209,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vand_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vand_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -1222,7 +1224,7 @@ entry:
     <vscale x 8 x i8> %1,
     i8 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1231,9 +1233,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vand_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vand_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -1244,7 +1246,7 @@ entry:
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1254,10 +1256,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vand_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vand_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -1269,7 +1271,7 @@ entry:
     <vscale x 16 x i8> %1,
     i8 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1278,9 +1280,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vand_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vand_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -1291,7 +1293,7 @@ entry:
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1301,10 +1303,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vand_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vand_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1316,7 +1318,7 @@ entry:
     <vscale x 32 x i8> %1,
     i8 %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1325,9 +1327,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vand_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vand_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vx_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
@@ -1338,7 +1340,7 @@ entry:
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i8> %a
 }
@@ -1348,10 +1350,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8(
   <vscale x 64 x i8>,
   i8,
   <vscale x 64 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vand_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vand_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
@@ -1363,7 +1365,7 @@ entry:
     <vscale x 64 x i8> %1,
     i8 %2,
     <vscale x 64 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 64 x i8> %a
 }
@@ -1372,9 +1374,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vand_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vand_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1385,7 +1387,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1395,10 +1397,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vand_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vand_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1410,7 +1412,7 @@ entry:
     <vscale x 1 x i16> %1,
     i16 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1419,9 +1421,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vand_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vand_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1432,7 +1434,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1442,10 +1444,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vand_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vand_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1457,7 +1459,7 @@ entry:
     <vscale x 2 x i16> %1,
     i16 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1466,9 +1468,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vand_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vand_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1479,7 +1481,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1489,10 +1491,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vand_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vand_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1504,7 +1506,7 @@ entry:
     <vscale x 4 x i16> %1,
     i16 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1513,9 +1515,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vand_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vand_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1526,7 +1528,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1536,10 +1538,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vand_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vand_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1551,7 +1553,7 @@ entry:
     <vscale x 8 x i16> %1,
     i16 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1560,9 +1562,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vand_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vand_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1573,7 +1575,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1583,10 +1585,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vand_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vand_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1598,7 +1600,7 @@ entry:
     <vscale x 16 x i16> %1,
     i16 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1607,9 +1609,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vand_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vand_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vx_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -1620,7 +1622,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1630,10 +1632,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16(
   <vscale x 32 x i16>,
   i16,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vand_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vand_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -1645,7 +1647,7 @@ entry:
     <vscale x 32 x i16> %1,
     i16 %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1654,9 +1656,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vand_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vand_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1667,7 +1669,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1677,10 +1679,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vand_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vand_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1692,7 +1694,7 @@ entry:
     <vscale x 1 x i32> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1701,9 +1703,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vand_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vand_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1714,7 +1716,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1724,10 +1726,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vand_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vand_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1739,7 +1741,7 @@ entry:
     <vscale x 2 x i32> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1748,9 +1750,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vand_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vand_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1761,7 +1763,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1771,10 +1773,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vand_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vand_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1786,7 +1788,7 @@ entry:
     <vscale x 4 x i32> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1795,9 +1797,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vand_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vand_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1808,7 +1810,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1818,10 +1820,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vand_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vand_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1833,7 +1835,7 @@ entry:
     <vscale x 8 x i32> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1842,9 +1844,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vand_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vand_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vx_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -1855,7 +1857,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1865,10 +1867,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32(
   <vscale x 16 x i32>,
   i32,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vand_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vand_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -1880,7 +1882,7 @@ entry:
     <vscale x 16 x i32> %1,
     i32 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1889,26 +1891,32 @@ declare <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   i64,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vand_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT:    vlse64.v v9, (a0), zero
-; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vand_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vand_vx_nxv1i64_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vand.vv v8, v8, v9
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vand_vx_nxv1i64_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64(
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1918,28 +1926,34 @@ declare <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
   <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vand_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vand_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vand_mask_vx_nxv1i64_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vand.vv v8, v9, v10, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vand_mask_vx_nxv1i64_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT:    vand.vx v8, v9, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     i64 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1948,26 +1962,32 @@ declare <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   i64,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vand_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vand.vv v8, v8, v10
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vand_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vand_vx_nxv2i64_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vand.vv v8, v8, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vand_vx_nxv2i64_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64(
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1977,28 +1997,34 @@ declare <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
   <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vand_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vand.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vand_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vand_mask_vx_nxv2i64_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vand.vv v8, v10, v12, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vand_mask_vx_nxv2i64_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT:    vand.vx v8, v10, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     i64 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -2007,26 +2033,32 @@ declare <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   i64,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vand_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vand.vv v8, v8, v12
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vand_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vand_vx_nxv4i64_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vand.vv v8, v8, v12
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vand_vx_nxv4i64_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64(
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -2036,28 +2068,34 @@ declare <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
   <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vand_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vand.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vand_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vand_mask_vx_nxv4i64_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vand.vv v8, v12, v16, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vand_mask_vx_nxv4i64_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT:    vand.vx v8, v12, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     i64 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -2066,26 +2104,32 @@ declare <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   i64,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vand_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vand_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vand.vv v8, v8, v16
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vand_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vand_vx_nxv8i64_nxv8i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vand.vv v8, v8, v16
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vand_vx_nxv8i64_nxv8i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64(
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -2095,33 +2139,39 @@ declare <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64(
   <vscale x 8 x i64>,
   i64,
   <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vand_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
-; CHECK-NEXT:    vlse64.v v24, (a0), zero
-; CHECK-NEXT:    vand.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vand_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vand_mask_vx_nxv8i64_nxv8i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT:    vlse64.v v24, (a0), zero
+; RV32-NEXT:    vand.vv v8, v16, v24, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vand_mask_vx_nxv8i64_nxv8i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT:    vand.vx v8, v16, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
     i64 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vand_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+define <vscale x 1 x i8> @intrinsic_vand_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vi_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -2132,12 +2182,12 @@ entry:
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
     i8 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vand_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vand_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -2149,12 +2199,12 @@ entry:
     <vscale x 1 x i8> %1,
     i8 9,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vand_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+define <vscale x 2 x i8> @intrinsic_vand_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vi_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -2165,12 +2215,12 @@ entry:
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8> %0,
     i8 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vand_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vand_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -2182,12 +2232,12 @@ entry:
     <vscale x 2 x i8> %1,
     i8 9,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vand_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+define <vscale x 4 x i8> @intrinsic_vand_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vi_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -2198,12 +2248,12 @@ entry:
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8> %0,
     i8 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vand_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vand_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -2215,12 +2265,12 @@ entry:
     <vscale x 4 x i8> %1,
     i8 9,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vand_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+define <vscale x 8 x i8> @intrinsic_vand_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vi_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -2231,12 +2281,12 @@ entry:
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8> %0,
     i8 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vand_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vand_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -2248,12 +2298,12 @@ entry:
     <vscale x 8 x i8> %1,
     i8 9,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vand_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+define <vscale x 16 x i8> @intrinsic_vand_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vi_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -2264,12 +2314,12 @@ entry:
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8> %0,
     i8 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vand_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vand_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -2281,12 +2331,12 @@ entry:
     <vscale x 16 x i8> %1,
     i8 9,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vand_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+define <vscale x 32 x i8> @intrinsic_vand_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vi_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -2297,12 +2347,12 @@ entry:
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8> %0,
     i8 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vand_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vand_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -2314,12 +2364,12 @@ entry:
     <vscale x 32 x i8> %1,
     i8 9,
     <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 64 x i8> @intrinsic_vand_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i32 %1) nounwind {
+define <vscale x 64 x i8> @intrinsic_vand_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vi_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
@@ -2330,12 +2380,12 @@ entry:
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8> %0,
     i8 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 64 x i8> %a
 }
 
-define <vscale x 64 x i8> @intrinsic_vand_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+define <vscale x 64 x i8> @intrinsic_vand_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
@@ -2347,12 +2397,12 @@ entry:
     <vscale x 64 x i8> %1,
     i8 9,
     <vscale x 64 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 64 x i8> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vand_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+define <vscale x 1 x i16> @intrinsic_vand_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vi_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -2363,12 +2413,12 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
     i16 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vand_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vand_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -2380,12 +2430,12 @@ entry:
     <vscale x 1 x i16> %1,
     i16 9,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vand_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+define <vscale x 2 x i16> @intrinsic_vand_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vi_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -2396,12 +2446,12 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
     i16 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vand_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vand_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -2413,12 +2463,12 @@ entry:
     <vscale x 2 x i16> %1,
     i16 9,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vand_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+define <vscale x 4 x i16> @intrinsic_vand_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vi_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -2429,12 +2479,12 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
     i16 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vand_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vand_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -2446,12 +2496,12 @@ entry:
     <vscale x 4 x i16> %1,
     i16 9,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vand_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+define <vscale x 8 x i16> @intrinsic_vand_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vi_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -2462,12 +2512,12 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
     i16 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vand_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vand_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -2479,12 +2529,12 @@ entry:
     <vscale x 8 x i16> %1,
     i16 9,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vand_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+define <vscale x 16 x i16> @intrinsic_vand_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vi_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -2495,12 +2545,12 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
     i16 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vand_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vand_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -2512,12 +2562,12 @@ entry:
     <vscale x 16 x i16> %1,
     i16 9,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 32 x i16> @intrinsic_vand_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i32 %1) nounwind {
+define <vscale x 32 x i16> @intrinsic_vand_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vi_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
@@ -2528,12 +2578,12 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
     i16 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 32 x i16> %a
 }
 
-define <vscale x 32 x i16> @intrinsic_vand_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i16> @intrinsic_vand_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -2545,12 +2595,12 @@ entry:
     <vscale x 32 x i16> %1,
     i16 9,
     <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vand_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+define <vscale x 1 x i32> @intrinsic_vand_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vi_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -2561,12 +2611,12 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
     i32 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vand_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vand_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -2578,12 +2628,12 @@ entry:
     <vscale x 1 x i32> %1,
     i32 9,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vand_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+define <vscale x 2 x i32> @intrinsic_vand_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vi_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -2594,12 +2644,12 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
     i32 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vand_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vand_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -2611,12 +2661,12 @@ entry:
     <vscale x 2 x i32> %1,
     i32 9,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vand_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+define <vscale x 4 x i32> @intrinsic_vand_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vi_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -2627,12 +2677,12 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
     i32 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vand_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vand_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -2644,12 +2694,12 @@ entry:
     <vscale x 4 x i32> %1,
     i32 9,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vand_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+define <vscale x 8 x i32> @intrinsic_vand_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vi_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -2660,12 +2710,12 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
     i32 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vand_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vand_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -2677,12 +2727,12 @@ entry:
     <vscale x 8 x i32> %1,
     i32 9,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 16 x i32> @intrinsic_vand_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1) nounwind {
+define <vscale x 16 x i32> @intrinsic_vand_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vi_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
@@ -2693,12 +2743,12 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
     i32 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i32> %a
 }
 
-define <vscale x 16 x i32> @intrinsic_vand_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vand_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -2710,12 +2760,12 @@ entry:
     <vscale x 16 x i32> %1,
     i32 9,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
 
-define <vscale x 1 x i64> @intrinsic_vand_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
+define <vscale x 1 x i64> @intrinsic_vand_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vi_nxv1i64_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -2726,12 +2776,12 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
     i64 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i64> %a
 }
 
-define <vscale x 1 x i64> @intrinsic_vand_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vand_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i64_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -2743,12 +2793,12 @@ entry:
     <vscale x 1 x i64> %1,
     i64 9,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
 
-define <vscale x 2 x i64> @intrinsic_vand_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
+define <vscale x 2 x i64> @intrinsic_vand_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vi_nxv2i64_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -2759,12 +2809,12 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
     i64 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i64> %a
 }
 
-define <vscale x 2 x i64> @intrinsic_vand_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vand_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i64_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -2776,12 +2826,12 @@ entry:
     <vscale x 2 x i64> %1,
     i64 9,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
 
-define <vscale x 4 x i64> @intrinsic_vand_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
+define <vscale x 4 x i64> @intrinsic_vand_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vi_nxv4i64_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -2792,12 +2842,12 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
     i64 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i64> %a
 }
 
-define <vscale x 4 x i64> @intrinsic_vand_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vand_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i64_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -2809,12 +2859,12 @@ entry:
     <vscale x 4 x i64> %1,
     i64 9,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
 
-define <vscale x 8 x i64> @intrinsic_vand_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i32 %1) nounwind {
+define <vscale x 8 x i64> @intrinsic_vand_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vand_vi_nxv8i64_nxv8i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
@@ -2825,12 +2875,12 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
     i64 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i64> %a
 }
 
-define <vscale x 8 x i64> @intrinsic_vand_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vand_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i64_nxv8i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -2842,7 +2892,7 @@ entry:
     <vscale x 8 x i64> %1,
     i64 9,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll
deleted file mode 100644
index 12dd21c279be9..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll
+++ /dev/null
@@ -1,2074 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vdiv_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vdiv_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vdiv_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vdiv_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vdiv_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vdiv_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vdiv_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vdiv.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vdiv_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vdiv_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vdiv.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vdiv_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vdiv_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vdiv.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i1>,
-  i64,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8r.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    <vscale x 64 x i8> %2,
-    <vscale x 64 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vdiv_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vdiv_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vdiv_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vdiv_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vdiv_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vdiv_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vdiv_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vdiv.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vdiv_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vdiv_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vdiv.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vdiv_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vdiv_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vdiv.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re16.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    <vscale x 32 x i16> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vdiv_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vdiv_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vdiv_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vdiv_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vdiv_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vdiv.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vdiv_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vdiv_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vdiv.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vdiv_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vdiv_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vdiv.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re32.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vdiv_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vdiv_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vdiv_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vdiv.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vdiv_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vdiv_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vdiv.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vdiv_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vdiv_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT:    vdiv.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re64.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i8,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vdiv_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vdiv_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i8 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i8,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vdiv_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i8_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vdiv_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i8_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i8 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i8,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vdiv_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i8_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vdiv_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i8_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i8 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i8,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vdiv_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i8_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vdiv_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i8_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i8 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i8,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vdiv_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv16i8_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vdiv_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i8_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vdiv.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i8 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i8,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vdiv_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv32i8_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vdiv_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv32i8_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vdiv.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i8 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i8,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vdiv_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv64i8_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i8,
-  <vscale x 64 x i1>,
-  i64,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vdiv_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv64i8_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vdiv.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    i8 %2,
-    <vscale x 64 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i16,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vdiv_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i16_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vdiv_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i16_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i16 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i16,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vdiv_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i16_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vdiv_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i16_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i16 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i16,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vdiv_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i16_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vdiv_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i16_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i16 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i16,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vdiv_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i16_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vdiv_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i16_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vdiv.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i16 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i16,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vdiv_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv16i16_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vdiv_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i16_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vdiv.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i16 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i16,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vdiv_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv32i16_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i16,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vdiv_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv32i16_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vdiv.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i16 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vdiv_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i32_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vdiv_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i32_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vdiv_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i32_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vdiv_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i32_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vdiv_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i32_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vdiv_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i32_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vdiv.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vdiv_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i32_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vdiv_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i32_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vdiv.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vdiv_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv16i32_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vdiv_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i32_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vdiv.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i32 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vdiv.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vdiv.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vdiv.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i64 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv.ll
similarity index 80%
rename from llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vdiv.ll
index d17334671f290..989ceca6c6b68 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdiv.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
 declare <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -18,7 +20,7 @@ entry:
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -28,10 +30,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -43,7 +45,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -52,9 +54,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vdiv_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vdiv_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -65,7 +67,7 @@ entry:
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -75,10 +77,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vdiv_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vdiv_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -90,7 +92,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -99,9 +101,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vdiv_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vdiv_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -112,7 +114,7 @@ entry:
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -122,10 +124,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vdiv_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vdiv_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -137,7 +139,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -146,9 +148,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vdiv_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vdiv_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -159,7 +161,7 @@ entry:
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -169,10 +171,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vdiv_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vdiv_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -184,7 +186,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -193,9 +195,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vdiv_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vdiv_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -206,7 +208,7 @@ entry:
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -216,10 +218,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vdiv_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vdiv_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -231,7 +233,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -240,9 +242,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vdiv_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vdiv_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -253,7 +255,7 @@ entry:
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
@@ -263,10 +265,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vdiv_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vdiv_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -278,7 +280,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -287,9 +289,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vdiv_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vdiv_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
@@ -300,7 +302,7 @@ entry:
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i8> %a
 }
@@ -310,10 +312,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8r.v v24, (a0)
@@ -326,7 +328,7 @@ entry:
     <vscale x 64 x i8> %1,
     <vscale x 64 x i8> %2,
     <vscale x 64 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 64 x i8> %a
 }
@@ -335,9 +337,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vdiv_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vdiv_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -348,7 +350,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -358,10 +360,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vdiv_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vdiv_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -373,7 +375,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -382,9 +384,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vdiv_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vdiv_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -395,7 +397,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -405,10 +407,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vdiv_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vdiv_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -420,7 +422,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -429,9 +431,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vdiv_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vdiv_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -442,7 +444,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -452,10 +454,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vdiv_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vdiv_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -467,7 +469,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -476,9 +478,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vdiv_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vdiv_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -489,7 +491,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -499,10 +501,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vdiv_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vdiv_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -514,7 +516,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -523,9 +525,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vdiv_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vdiv_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -536,7 +538,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -546,10 +548,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vdiv_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vdiv_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -561,7 +563,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -570,9 +572,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vdiv_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vdiv_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
@@ -583,7 +585,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -593,10 +595,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
@@ -609,7 +611,7 @@ entry:
     <vscale x 32 x i16> %1,
     <vscale x 32 x i16> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -618,9 +620,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vdiv_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vdiv_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -631,7 +633,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -641,10 +643,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vdiv_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vdiv_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -656,7 +658,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -665,9 +667,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vdiv_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vdiv_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -678,7 +680,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -688,10 +690,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vdiv_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vdiv_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -703,7 +705,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -712,9 +714,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vdiv_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vdiv_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -725,7 +727,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -735,10 +737,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vdiv_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vdiv_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -750,7 +752,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -759,9 +761,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vdiv_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vdiv_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -772,7 +774,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -782,10 +784,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vdiv_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vdiv_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -797,7 +799,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -806,9 +808,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vdiv_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vdiv_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
@@ -819,7 +821,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -829,10 +831,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
@@ -845,7 +847,7 @@ entry:
     <vscale x 16 x i32> %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -854,9 +856,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vdiv_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vdiv_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -867,7 +869,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -877,10 +879,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vdiv_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vdiv_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -892,7 +894,7 @@ entry:
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -901,9 +903,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vdiv_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vdiv_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -914,7 +916,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -924,10 +926,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vdiv_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vdiv_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -939,7 +941,7 @@ entry:
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -948,9 +950,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vdiv_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vdiv_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -961,7 +963,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -971,10 +973,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vdiv_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vdiv_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -986,7 +988,7 @@ entry:
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -995,9 +997,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vdiv_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vdiv_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
@@ -1008,7 +1010,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1018,10 +1020,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
@@ -1034,7 +1036,7 @@ entry:
     <vscale x 8 x i64> %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1043,9 +1045,9 @@ declare <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vdiv_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vdiv_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -1056,7 +1058,7 @@ entry:
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -1066,10 +1068,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vdiv_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vdiv_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -1081,7 +1083,7 @@ entry:
     <vscale x 1 x i8> %1,
     i8 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -1090,9 +1092,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vdiv_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vdiv_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -1103,7 +1105,7 @@ entry:
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -1113,10 +1115,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vdiv_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vdiv_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -1128,7 +1130,7 @@ entry:
     <vscale x 2 x i8> %1,
     i8 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -1137,9 +1139,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vdiv_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vdiv_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -1150,7 +1152,7 @@ entry:
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -1160,10 +1162,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vdiv_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vdiv_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -1175,7 +1177,7 @@ entry:
     <vscale x 4 x i8> %1,
     i8 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -1184,9 +1186,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vdiv_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vdiv_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -1197,7 +1199,7 @@ entry:
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1207,10 +1209,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vdiv_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vdiv_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -1222,7 +1224,7 @@ entry:
     <vscale x 8 x i8> %1,
     i8 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1231,9 +1233,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vdiv_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vdiv_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -1244,7 +1246,7 @@ entry:
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1254,10 +1256,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vdiv_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vdiv_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -1269,7 +1271,7 @@ entry:
     <vscale x 16 x i8> %1,
     i8 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1278,9 +1280,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vdiv_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vdiv_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -1291,7 +1293,7 @@ entry:
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1301,10 +1303,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vdiv_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vdiv_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1316,7 +1318,7 @@ entry:
     <vscale x 32 x i8> %1,
     i8 %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1325,9 +1327,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vdiv_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vdiv_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vx_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
@@ -1338,7 +1340,7 @@ entry:
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i8> %a
 }
@@ -1348,10 +1350,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.i8(
   <vscale x 64 x i8>,
   i8,
   <vscale x 64 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vdiv_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vdiv_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
@@ -1363,7 +1365,7 @@ entry:
     <vscale x 64 x i8> %1,
     i8 %2,
     <vscale x 64 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 64 x i8> %a
 }
@@ -1372,9 +1374,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vdiv_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vdiv_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1385,7 +1387,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1395,10 +1397,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vdiv_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vdiv_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1410,7 +1412,7 @@ entry:
     <vscale x 1 x i16> %1,
     i16 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1419,9 +1421,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vdiv_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vdiv_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1432,7 +1434,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1442,10 +1444,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vdiv_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vdiv_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1457,7 +1459,7 @@ entry:
     <vscale x 2 x i16> %1,
     i16 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1466,9 +1468,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vdiv_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vdiv_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1479,7 +1481,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1489,10 +1491,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vdiv_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vdiv_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1504,7 +1506,7 @@ entry:
     <vscale x 4 x i16> %1,
     i16 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1513,9 +1515,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vdiv_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vdiv_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1526,7 +1528,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1536,10 +1538,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vdiv_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vdiv_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1551,7 +1553,7 @@ entry:
     <vscale x 8 x i16> %1,
     i16 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1560,9 +1562,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vdiv_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vdiv_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1573,7 +1575,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1583,10 +1585,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vdiv_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vdiv_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1598,7 +1600,7 @@ entry:
     <vscale x 16 x i16> %1,
     i16 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1607,9 +1609,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vdiv_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vdiv_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vx_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -1620,7 +1622,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1630,10 +1632,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.i16(
   <vscale x 32 x i16>,
   i16,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vdiv_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vdiv_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -1645,7 +1647,7 @@ entry:
     <vscale x 32 x i16> %1,
     i16 %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1654,9 +1656,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vdiv_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vdiv_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1667,7 +1669,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1677,10 +1679,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vdiv_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vdiv_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1692,7 +1694,7 @@ entry:
     <vscale x 1 x i32> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1701,9 +1703,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vdiv_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vdiv_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1714,7 +1716,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1724,10 +1726,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vdiv_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vdiv_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1739,7 +1741,7 @@ entry:
     <vscale x 2 x i32> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1748,9 +1750,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vdiv_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vdiv_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1761,7 +1763,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1771,10 +1773,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vdiv_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vdiv_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1786,7 +1788,7 @@ entry:
     <vscale x 4 x i32> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1795,9 +1797,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vdiv_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vdiv_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1808,7 +1810,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1818,10 +1820,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vdiv_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vdiv_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1833,7 +1835,7 @@ entry:
     <vscale x 8 x i32> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1842,9 +1844,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vdiv_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vdiv_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_vx_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -1855,7 +1857,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1865,10 +1867,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.i32(
   <vscale x 16 x i32>,
   i32,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vdiv_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vdiv_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -1880,7 +1882,7 @@ entry:
     <vscale x 16 x i32> %1,
     i32 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1889,26 +1891,32 @@ declare <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   i64,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT:    vlse64.v v9, (a0), zero
-; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vdiv.vv v8, v8, v9
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT:    vdiv.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.i64(
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1918,28 +1926,34 @@ declare <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
   <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vdiv.vv v8, v9, v10, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT:    vdiv.vx v8, v9, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     i64 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1948,26 +1962,32 @@ declare <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   i64,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vdiv.vv v8, v8, v10
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vdiv.vv v8, v8, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT:    vdiv.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.i64(
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1977,28 +1997,34 @@ declare <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
   <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vdiv.vv v8, v10, v12, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT:    vdiv.vx v8, v10, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     i64 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -2007,26 +2033,32 @@ declare <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   i64,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vdiv.vv v8, v8, v12
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vdiv.vv v8, v8, v12
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT:    vdiv.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.i64(
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -2036,28 +2068,34 @@ declare <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
   <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vdiv.vv v8, v12, v16, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT:    vdiv.vx v8, v12, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     i64 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -2066,26 +2104,32 @@ declare <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   i64,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vdiv.vv v8, v8, v16
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vdiv.vv v8, v8, v16
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT:    vdiv.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.i64(
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -2095,28 +2139,34 @@ declare <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.i64(
   <vscale x 8 x i64>,
   i64,
   <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
-; CHECK-NEXT:    vlse64.v v24, (a0), zero
-; CHECK-NEXT:    vdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT:    vlse64.v v24, (a0), zero
+; RV32-NEXT:    vdiv.vv v8, v16, v24, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT:    vdiv.vx v8, v16, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
     i64 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll
deleted file mode 100644
index a93f01e001b1a..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll
+++ /dev/null
@@ -1,2074 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vdivu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vdivu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vdivu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vdivu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vdivu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vdivu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vdivu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vdivu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vdivu.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vdivu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vdivu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vdivu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vdivu.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vdivu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vdivu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vdivu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vdivu.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i1>,
-  i64,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8r.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vdivu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    <vscale x 64 x i8> %2,
-    <vscale x 64 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vdivu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vdivu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vdivu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vdivu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vdivu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vdivu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vdivu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vdivu.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vdivu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vdivu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vdivu_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vdivu.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vdivu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vdivu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vdivu_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vdivu.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re16.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vdivu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    <vscale x 32 x i16> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vdivu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vdivu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vdivu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vdivu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vdivu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vdivu.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vdivu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vdivu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vdivu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vdivu.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vdivu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vdivu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vdivu_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vdivu.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re32.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vdivu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vdivu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vdivu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vdivu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vdivu.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vdivu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vdivu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vdivu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vdivu.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vdivu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vdivu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vdivu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT:    vdivu.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re64.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vdivu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i8,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vdivu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vdivu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i8 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i8,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vdivu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i8_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vdivu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i8_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i8 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i8,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vdivu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i8_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vdivu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i8_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i8 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i8,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vdivu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i8_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vdivu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i8_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i8 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i8,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vdivu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv16i8_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vdivu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i8_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vdivu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i8 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i8,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vdivu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv32i8_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vdivu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv32i8_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vdivu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i8 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i8,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vdivu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv64i8_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i8,
-  <vscale x 64 x i1>,
-  i64,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vdivu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv64i8_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vdivu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    i8 %2,
-    <vscale x 64 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i16,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vdivu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i16_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vdivu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i16_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i16 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i16,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vdivu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i16_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vdivu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i16_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i16 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i16,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vdivu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i16_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vdivu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i16_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i16 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i16,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vdivu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i16_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vdivu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i16_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vdivu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i16 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i16,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vdivu_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv16i16_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vdivu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i16_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vdivu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i16 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i16,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vdivu_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv32i16_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i16,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vdivu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv32i16_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vdivu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i16 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vdivu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i32_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vdivu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i32_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vdivu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i32_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vdivu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i32_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vdivu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i32_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vdivu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i32_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vdivu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vdivu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i32_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vdivu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i32_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vdivu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vdivu_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv16i32_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vdivu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i32_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vdivu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i32 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vdivu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vdivu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vdivu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i64 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu.ll
similarity index 80%
rename from llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vdivu.ll
index 5d8cc72e4e88a..236e35be4f8d4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdivu.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
 declare <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -18,7 +20,7 @@ entry:
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -28,10 +30,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vdivu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vdivu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -43,7 +45,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -52,9 +54,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vdivu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vdivu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -65,7 +67,7 @@ entry:
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -75,10 +77,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vdivu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vdivu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -90,7 +92,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -99,9 +101,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vdivu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vdivu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -112,7 +114,7 @@ entry:
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -122,10 +124,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vdivu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vdivu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -137,7 +139,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -146,9 +148,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vdivu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vdivu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -159,7 +161,7 @@ entry:
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -169,10 +171,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vdivu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vdivu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -184,7 +186,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -193,9 +195,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vdivu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vdivu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -206,7 +208,7 @@ entry:
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -216,10 +218,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vdivu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vdivu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -231,7 +233,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -240,9 +242,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vdivu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vdivu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -253,7 +255,7 @@ entry:
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
@@ -263,10 +265,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vdivu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vdivu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -278,7 +280,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -287,9 +289,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vdivu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vdivu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
@@ -300,7 +302,7 @@ entry:
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i8> %a
 }
@@ -310,10 +312,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8r.v v24, (a0)
@@ -326,7 +328,7 @@ entry:
     <vscale x 64 x i8> %1,
     <vscale x 64 x i8> %2,
     <vscale x 64 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 64 x i8> %a
 }
@@ -335,9 +337,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vdivu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vdivu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -348,7 +350,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -358,10 +360,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vdivu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vdivu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -373,7 +375,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -382,9 +384,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vdivu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vdivu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -395,7 +397,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -405,10 +407,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vdivu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vdivu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -420,7 +422,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -429,9 +431,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vdivu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vdivu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -442,7 +444,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -452,10 +454,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vdivu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vdivu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -467,7 +469,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -476,9 +478,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vdivu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vdivu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -489,7 +491,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -499,10 +501,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vdivu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vdivu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -514,7 +516,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -523,9 +525,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vdivu_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vdivu_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -536,7 +538,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -546,10 +548,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vdivu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vdivu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -561,7 +563,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -570,9 +572,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vdivu_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vdivu_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
@@ -583,7 +585,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -593,10 +595,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
@@ -609,7 +611,7 @@ entry:
     <vscale x 32 x i16> %1,
     <vscale x 32 x i16> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -618,9 +620,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vdivu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vdivu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -631,7 +633,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -641,10 +643,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vdivu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vdivu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -656,7 +658,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -665,9 +667,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vdivu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vdivu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -678,7 +680,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -688,10 +690,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vdivu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vdivu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -703,7 +705,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -712,9 +714,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vdivu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vdivu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -725,7 +727,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -735,10 +737,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vdivu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vdivu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -750,7 +752,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -759,9 +761,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vdivu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vdivu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -772,7 +774,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -782,10 +784,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vdivu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vdivu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -797,7 +799,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -806,9 +808,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vdivu_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vdivu_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
@@ -819,7 +821,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -829,10 +831,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
@@ -845,7 +847,7 @@ entry:
     <vscale x 16 x i32> %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -854,9 +856,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vdivu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vdivu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -867,7 +869,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -877,10 +879,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vdivu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vdivu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -892,7 +894,7 @@ entry:
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -901,9 +903,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vdivu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vdivu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -914,7 +916,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -924,10 +926,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vdivu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vdivu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -939,7 +941,7 @@ entry:
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -948,9 +950,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vdivu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vdivu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -961,7 +963,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -971,10 +973,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vdivu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vdivu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -986,7 +988,7 @@ entry:
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -995,9 +997,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vdivu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vdivu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
@@ -1008,7 +1010,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1018,10 +1020,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
@@ -1034,7 +1036,7 @@ entry:
     <vscale x 8 x i64> %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1043,9 +1045,9 @@ declare <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vdivu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vdivu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -1056,7 +1058,7 @@ entry:
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -1066,10 +1068,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vdivu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vdivu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -1081,7 +1083,7 @@ entry:
     <vscale x 1 x i8> %1,
     i8 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -1090,9 +1092,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vdivu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vdivu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -1103,7 +1105,7 @@ entry:
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -1113,10 +1115,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vdivu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vdivu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -1128,7 +1130,7 @@ entry:
     <vscale x 2 x i8> %1,
     i8 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -1137,9 +1139,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vdivu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vdivu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -1150,7 +1152,7 @@ entry:
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -1160,10 +1162,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vdivu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vdivu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -1175,7 +1177,7 @@ entry:
     <vscale x 4 x i8> %1,
     i8 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -1184,9 +1186,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vdivu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vdivu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -1197,7 +1199,7 @@ entry:
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1207,10 +1209,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vdivu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vdivu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -1222,7 +1224,7 @@ entry:
     <vscale x 8 x i8> %1,
     i8 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1231,9 +1233,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vdivu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vdivu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -1244,7 +1246,7 @@ entry:
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1254,10 +1256,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vdivu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vdivu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -1269,7 +1271,7 @@ entry:
     <vscale x 16 x i8> %1,
     i8 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1278,9 +1280,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vdivu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vdivu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -1291,7 +1293,7 @@ entry:
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1301,10 +1303,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vdivu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vdivu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1316,7 +1318,7 @@ entry:
     <vscale x 32 x i8> %1,
     i8 %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1325,9 +1327,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vdivu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vdivu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vx_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
@@ -1338,7 +1340,7 @@ entry:
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i8> %a
 }
@@ -1348,10 +1350,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.i8(
   <vscale x 64 x i8>,
   i8,
   <vscale x 64 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vdivu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vdivu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
@@ -1363,7 +1365,7 @@ entry:
     <vscale x 64 x i8> %1,
     i8 %2,
     <vscale x 64 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 64 x i8> %a
 }
@@ -1372,9 +1374,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vdivu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vdivu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1385,7 +1387,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1395,10 +1397,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vdivu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vdivu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1410,7 +1412,7 @@ entry:
     <vscale x 1 x i16> %1,
     i16 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1419,9 +1421,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vdivu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vdivu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1432,7 +1434,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1442,10 +1444,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vdivu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vdivu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1457,7 +1459,7 @@ entry:
     <vscale x 2 x i16> %1,
     i16 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1466,9 +1468,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vdivu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vdivu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1479,7 +1481,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1489,10 +1491,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vdivu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vdivu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1504,7 +1506,7 @@ entry:
     <vscale x 4 x i16> %1,
     i16 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1513,9 +1515,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vdivu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vdivu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1526,7 +1528,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1536,10 +1538,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vdivu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vdivu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1551,7 +1553,7 @@ entry:
     <vscale x 8 x i16> %1,
     i16 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1560,9 +1562,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vdivu_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vdivu_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1573,7 +1575,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1583,10 +1585,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vdivu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vdivu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1598,7 +1600,7 @@ entry:
     <vscale x 16 x i16> %1,
     i16 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1607,9 +1609,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vdivu_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vdivu_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vx_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -1620,7 +1622,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1630,10 +1632,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.i16(
   <vscale x 32 x i16>,
   i16,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vdivu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vdivu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -1645,7 +1647,7 @@ entry:
     <vscale x 32 x i16> %1,
     i16 %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1654,9 +1656,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vdivu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vdivu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1667,7 +1669,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1677,10 +1679,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vdivu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vdivu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1692,7 +1694,7 @@ entry:
     <vscale x 1 x i32> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1701,9 +1703,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vdivu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vdivu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1714,7 +1716,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1724,10 +1726,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vdivu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vdivu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1739,7 +1741,7 @@ entry:
     <vscale x 2 x i32> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1748,9 +1750,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vdivu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vdivu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1761,7 +1763,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1771,10 +1773,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vdivu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vdivu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1786,7 +1788,7 @@ entry:
     <vscale x 4 x i32> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1795,9 +1797,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vdivu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vdivu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1808,7 +1810,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1818,10 +1820,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vdivu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vdivu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1833,7 +1835,7 @@ entry:
     <vscale x 8 x i32> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1842,9 +1844,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vdivu_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vdivu_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_vx_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -1855,7 +1857,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1865,10 +1867,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.i32(
   <vscale x 16 x i32>,
   i32,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vdivu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vdivu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -1880,7 +1882,7 @@ entry:
     <vscale x 16 x i32> %1,
     i32 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1889,26 +1891,32 @@ declare <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   i64,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT:    vlse64.v v9, (a0), zero
-; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vdivu.vv v8, v8, v9
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT:    vdivu.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.i64(
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1918,28 +1926,34 @@ declare <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
   <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vdivu.vv v8, v9, v10, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT:    vdivu.vx v8, v9, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     i64 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1948,26 +1962,32 @@ declare <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   i64,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vdivu.vv v8, v8, v10
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vdivu.vv v8, v8, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT:    vdivu.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.i64(
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1977,28 +1997,34 @@ declare <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
   <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vdivu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vdivu.vv v8, v10, v12, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT:    vdivu.vx v8, v10, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     i64 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -2007,26 +2033,32 @@ declare <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   i64,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vdivu.vv v8, v8, v12
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vdivu.vv v8, v8, v12
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT:    vdivu.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.i64(
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -2036,28 +2068,34 @@ declare <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
   <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vdivu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vdivu.vv v8, v12, v16, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT:    vdivu.vx v8, v12, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     i64 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -2066,26 +2104,32 @@ declare <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   i64,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vdivu.vv v8, v8, v16
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vdivu.vv v8, v8, v16
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT:    vdivu.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.i64(
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -2095,28 +2139,34 @@ declare <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.i64(
   <vscale x 8 x i64>,
   i64,
   <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
-; CHECK-NEXT:    vlse64.v v24, (a0), zero
-; CHECK-NEXT:    vdivu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT:    vlse64.v v24, (a0), zero
+; RV32-NEXT:    vdivu.vv v8, v16, v24, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT:    vdivu.vx v8, v16, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
     i64 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll
deleted file mode 100644
index c13eeb85b7f5c..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll
+++ /dev/null
@@ -1,2029 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh,+experimental-zvfh,+f,+d -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vleff_v_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vle64ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.nxv1i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 1 x i64>, i32 } %a, 0
-  %c = extractvalue { <vscale x 1 x i64>, i32 } %a, 1
-  store i32 %c, i32* %2
-  ret <vscale x 1 x i64> %b
-}
-
-declare { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.mask.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.mask.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
-    <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 1 x i64>, i32 } %a, 0
-  %c = extractvalue { <vscale x 1 x i64>, i32 } %a, 1
-  store i32 %c, i32* %4
-
-  ret <vscale x 1 x i64> %b
-}
-
-declare { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vleff_v_nxv2i64_nxv2i64(<vscale x 2 x i64>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vle64ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.nxv2i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 2 x i64>, i32 } %a, 0
-  %c = extractvalue { <vscale x 2 x i64>, i32 } %a, 1
-  store i32 %c, i32* %2
-  ret <vscale x 2 x i64> %b
-}
-
-declare { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.mask.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vleff_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.mask.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
-    <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 2 x i64>, i32 } %a, 0
-  %c = extractvalue { <vscale x 2 x i64>, i32 } %a, 1
-  store i32 %c, i32* %4
-
-  ret <vscale x 2 x i64> %b
-}
-
-declare { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vleff_v_nxv4i64_nxv4i64(<vscale x 4 x i64>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vle64ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.nxv4i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 4 x i64>, i32 } %a, 0
-  %c = extractvalue { <vscale x 4 x i64>, i32 } %a, 1
-  store i32 %c, i32* %2
-  ret <vscale x 4 x i64> %b
-}
-
-declare { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.mask.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vleff_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.mask.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
-    <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 4 x i64>, i32 } %a, 0
-  %c = extractvalue { <vscale x 4 x i64>, i32 } %a, 1
-  store i32 %c, i32* %4
-
-  ret <vscale x 4 x i64> %b
-}
-
-declare { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vleff_v_nxv8i64_nxv8i64(<vscale x 8 x i64>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT:    vle64ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.nxv8i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 8 x i64>, i32 } %a, 0
-  %c = extractvalue { <vscale x 8 x i64>, i32 } %a, 1
-  store i32 %c, i32* %2
-  ret <vscale x 8 x i64> %b
-}
-
-declare { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.mask.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vleff_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.mask.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
-    <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 8 x i64>, i32 } %a, 0
-  %c = extractvalue { <vscale x 8 x i64>, i32 } %a, 1
-  store i32 %c, i32* %4
-
-  ret <vscale x 8 x i64> %b
-}
-
-declare { <vscale x 1 x double>, i32 } @llvm.riscv.vleff.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vleff_v_nxv1f64_nxv1f64(<vscale x 1 x double>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vle64ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x double>, i32 } @llvm.riscv.vleff.nxv1f64(
-    <vscale x 1 x double> undef,
-    <vscale x 1 x double>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 1 x double>, i32 } %a, 0
-  %c = extractvalue { <vscale x 1 x double>, i32 } %a, 1
-  store i32 %c, i32* %2
-  ret <vscale x 1 x double> %b
-}
-
-declare { <vscale x 1 x double>, i32 } @llvm.riscv.vleff.mask.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vleff_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x double>, i32 } @llvm.riscv.vleff.mask.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
-    <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 1 x double>, i32 } %a, 0
-  %c = extractvalue { <vscale x 1 x double>, i32 } %a, 1
-  store i32 %c, i32* %4
-
-  ret <vscale x 1 x double> %b
-}
-
-declare { <vscale x 2 x double>, i32 } @llvm.riscv.vleff.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  i32);
-
-define <vscale x 2 x double> @intrinsic_vleff_v_nxv2f64_nxv2f64(<vscale x 2 x double>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vle64ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 2 x double>, i32 } @llvm.riscv.vleff.nxv2f64(
-    <vscale x 2 x double> undef,
-    <vscale x 2 x double>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 2 x double>, i32 } %a, 0
-  %c = extractvalue { <vscale x 2 x double>, i32 } %a, 1
-  store i32 %c, i32* %2
-  ret <vscale x 2 x double> %b
-}
-
-declare { <vscale x 2 x double>, i32 } @llvm.riscv.vleff.mask.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x double> @intrinsic_vleff_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 2 x double>, i32 } @llvm.riscv.vleff.mask.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
-    <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 2 x double>, i32 } %a, 0
-  %c = extractvalue { <vscale x 2 x double>, i32 } %a, 1
-  store i32 %c, i32* %4
-
-  ret <vscale x 2 x double> %b
-}
-
-declare { <vscale x 4 x double>, i32 } @llvm.riscv.vleff.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  i32);
-
-define <vscale x 4 x double> @intrinsic_vleff_v_nxv4f64_nxv4f64(<vscale x 4 x double>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vle64ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 4 x double>, i32 } @llvm.riscv.vleff.nxv4f64(
-    <vscale x 4 x double> undef,
-    <vscale x 4 x double>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 4 x double>, i32 } %a, 0
-  %c = extractvalue { <vscale x 4 x double>, i32 } %a, 1
-  store i32 %c, i32* %2
-  ret <vscale x 4 x double> %b
-}
-
-declare { <vscale x 4 x double>, i32 } @llvm.riscv.vleff.mask.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x double> @intrinsic_vleff_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 4 x double>, i32 } @llvm.riscv.vleff.mask.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
-    <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 4 x double>, i32 } %a, 0
-  %c = extractvalue { <vscale x 4 x double>, i32 } %a, 1
-  store i32 %c, i32* %4
-
-  ret <vscale x 4 x double> %b
-}
-
-declare { <vscale x 8 x double>, i32 } @llvm.riscv.vleff.nxv8f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  i32);
-
-define <vscale x 8 x double> @intrinsic_vleff_v_nxv8f64_nxv8f64(<vscale x 8 x double>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv8f64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT:    vle64ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 8 x double>, i32 } @llvm.riscv.vleff.nxv8f64(
-    <vscale x 8 x double> undef,
-    <vscale x 8 x double>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 8 x double>, i32 } %a, 0
-  %c = extractvalue { <vscale x 8 x double>, i32 } %a, 1
-  store i32 %c, i32* %2
-  ret <vscale x 8 x double> %b
-}
-
-declare { <vscale x 8 x double>, i32 } @llvm.riscv.vleff.mask.nxv8f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x double> @intrinsic_vleff_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 8 x double>, i32 } @llvm.riscv.vleff.mask.nxv8f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
-    <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 8 x double>, i32 } %a, 0
-  %c = extractvalue { <vscale x 8 x double>, i32 } %a, 1
-  store i32 %c, i32* %4
-
-  ret <vscale x 8 x double> %b
-}
-
-declare { <vscale x 1 x i32>, i32 } @llvm.riscv.vleff.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vleff_v_nxv1i32_nxv1i32(<vscale x 1 x i32>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vle32ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x i32>, i32 } @llvm.riscv.vleff.nxv1i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 1 x i32>, i32 } %a, 0
-  %c = extractvalue { <vscale x 1 x i32>, i32 } %a, 1
-  store i32 %c, i32* %2
-  ret <vscale x 1 x i32> %b
-}
-
-declare { <vscale x 1 x i32>, i32 } @llvm.riscv.vleff.mask.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vleff_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x i32>, i32 } @llvm.riscv.vleff.mask.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
-    <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 1 x i32>, i32 } %a, 0
-  %c = extractvalue { <vscale x 1 x i32>, i32 } %a, 1
-  store i32 %c, i32* %4
-
-  ret <vscale x 1 x i32> %b
-}
-
-declare { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vleff_v_nxv2i32_nxv2i32(<vscale x 2 x i32>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vle32ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.nxv2i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 2 x i32>, i32 } %a, 0
-  %c = extractvalue { <vscale x 2 x i32>, i32 } %a, 1
-  store i32 %c, i32* %2
-  ret <vscale x 2 x i32> %b
-}
-
-declare { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.mask.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vleff_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.mask.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
-    <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 2 x i32>, i32 } %a, 0
-  %c = extractvalue { <vscale x 2 x i32>, i32 } %a, 1
-  store i32 %c, i32* %4
-
-  ret <vscale x 2 x i32> %b
-}
-
-declare { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vleff_v_nxv4i32_nxv4i32(<vscale x 4 x i32>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vle32ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.nxv4i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 4 x i32>, i32 } %a, 0
-  %c = extractvalue { <vscale x 4 x i32>, i32 } %a, 1
-  store i32 %c, i32* %2
-  ret <vscale x 4 x i32> %b
-}
-
-declare { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.mask.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vleff_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.mask.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
-    <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 4 x i32>, i32 } %a, 0
-  %c = extractvalue { <vscale x 4 x i32>, i32 } %a, 1
-  store i32 %c, i32* %4
-
-  ret <vscale x 4 x i32> %b
-}
-
-declare { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vleff_v_nxv8i32_nxv8i32(<vscale x 8 x i32>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vle32ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.nxv8i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 8 x i32>, i32 } %a, 0
-  %c = extractvalue { <vscale x 8 x i32>, i32 } %a, 1
-  store i32 %c, i32* %2
-  ret <vscale x 8 x i32> %b
-}
-
-declare { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.mask.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vleff_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.mask.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
-    <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 8 x i32>, i32 } %a, 0
-  %c = extractvalue { <vscale x 8 x i32>, i32 } %a, 1
-  store i32 %c, i32* %4
-
-  ret <vscale x 8 x i32> %b
-}
-
-declare { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vleff_v_nxv16i32_nxv16i32(<vscale x 16 x i32>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT:    vle32ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.nxv16i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 16 x i32>, i32 } %a, 0
-  %c = extractvalue { <vscale x 16 x i32>, i32 } %a, 1
-  store i32 %c, i32* %2
-  ret <vscale x 16 x i32> %b
-}
-
-declare { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.mask.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vleff_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.mask.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
-    <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 16 x i32>, i32 } %a, 0
-  %c = extractvalue { <vscale x 16 x i32>, i32 } %a, 1
-  store i32 %c, i32* %4
-
-  ret <vscale x 16 x i32> %b
-}
-
-declare { <vscale x 1 x float>, i32 } @llvm.riscv.vleff.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  i32);
-
-define <vscale x 1 x float> @intrinsic_vleff_v_nxv1f32_nxv1f32(<vscale x 1 x float>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vle32ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x float>, i32 } @llvm.riscv.vleff.nxv1f32(
-    <vscale x 1 x float> undef,
-    <vscale x 1 x float>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 1 x float>, i32 } %a, 0
-  %c = extractvalue { <vscale x 1 x float>, i32 } %a, 1
-  store i32 %c, i32* %2
-  ret <vscale x 1 x float> %b
-}
-
-declare { <vscale x 1 x float>, i32 } @llvm.riscv.vleff.mask.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x float> @intrinsic_vleff_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x float>, i32 } @llvm.riscv.vleff.mask.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
-    <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 1 x float>, i32 } %a, 0
-  %c = extractvalue { <vscale x 1 x float>, i32 } %a, 1
-  store i32 %c, i32* %4
-
-  ret <vscale x 1 x float> %b
-}
-
-declare { <vscale x 2 x float>, i32 } @llvm.riscv.vleff.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vleff_v_nxv2f32_nxv2f32(<vscale x 2 x float>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vle32ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 2 x float>, i32 } @llvm.riscv.vleff.nxv2f32(
-    <vscale x 2 x float> undef,
-    <vscale x 2 x float>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 2 x float>, i32 } %a, 0
-  %c = extractvalue { <vscale x 2 x float>, i32 } %a, 1
-  store i32 %c, i32* %2
-  ret <vscale x 2 x float> %b
-}
-
-declare { <vscale x 2 x float>, i32 } @llvm.riscv.vleff.mask.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vleff_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 2 x float>, i32 } @llvm.riscv.vleff.mask.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
-    <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 2 x float>, i32 } %a, 0
-  %c = extractvalue { <vscale x 2 x float>, i32 } %a, 1
-  store i32 %c, i32* %4
-
-  ret <vscale x 2 x float> %b
-}
-
-declare { <vscale x 4 x float>, i32 } @llvm.riscv.vleff.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  i32);
-
-define <vscale x 4 x float> @intrinsic_vleff_v_nxv4f32_nxv4f32(<vscale x 4 x float>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vle32ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 4 x float>, i32 } @llvm.riscv.vleff.nxv4f32(
-    <vscale x 4 x float> undef,
-    <vscale x 4 x float>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 4 x float>, i32 } %a, 0
-  %c = extractvalue { <vscale x 4 x float>, i32 } %a, 1
-  store i32 %c, i32* %2
-  ret <vscale x 4 x float> %b
-}
-
-declare { <vscale x 4 x float>, i32 } @llvm.riscv.vleff.mask.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x float> @intrinsic_vleff_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 4 x float>, i32 } @llvm.riscv.vleff.mask.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
-    <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 4 x float>, i32 } %a, 0
-  %c = extractvalue { <vscale x 4 x float>, i32 } %a, 1
-  store i32 %c, i32* %4
-
-  ret <vscale x 4 x float> %b
-}
-
-declare { <vscale x 8 x float>, i32 } @llvm.riscv.vleff.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  i32);
-
-define <vscale x 8 x float> @intrinsic_vleff_v_nxv8f32_nxv8f32(<vscale x 8 x float>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vle32ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 8 x float>, i32 } @llvm.riscv.vleff.nxv8f32(
-    <vscale x 8 x float> undef,
-    <vscale x 8 x float>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 8 x float>, i32 } %a, 0
-  %c = extractvalue { <vscale x 8 x float>, i32 } %a, 1
-  store i32 %c, i32* %2
-  ret <vscale x 8 x float> %b
-}
-
-declare { <vscale x 8 x float>, i32 } @llvm.riscv.vleff.mask.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x float> @intrinsic_vleff_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 8 x float>, i32 } @llvm.riscv.vleff.mask.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
-    <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 8 x float>, i32 } %a, 0
-  %c = extractvalue { <vscale x 8 x float>, i32 } %a, 1
-  store i32 %c, i32* %4
-
-  ret <vscale x 8 x float> %b
-}
-
-declare { <vscale x 16 x float>, i32 } @llvm.riscv.vleff.nxv16f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>*,
-  i32);
-
-define <vscale x 16 x float> @intrinsic_vleff_v_nxv16f32_nxv16f32(<vscale x 16 x float>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv16f32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT:    vle32ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 16 x float>, i32 } @llvm.riscv.vleff.nxv16f32(
-    <vscale x 16 x float> undef,
-    <vscale x 16 x float>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 16 x float>, i32 } %a, 0
-  %c = extractvalue { <vscale x 16 x float>, i32 } %a, 1
-  store i32 %c, i32* %2
-  ret <vscale x 16 x float> %b
-}
-
-declare { <vscale x 16 x float>, i32 } @llvm.riscv.vleff.mask.nxv16f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>*,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x float> @intrinsic_vleff_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16f32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 16 x float>, i32 } @llvm.riscv.vleff.mask.nxv16f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
-    <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 16 x float>, i32 } %a, 0
-  %c = extractvalue { <vscale x 16 x float>, i32 } %a, 1
-  store i32 %c, i32* %4
-
-  ret <vscale x 16 x float> %b
-}
-
-declare { <vscale x 1 x i16>, i32 } @llvm.riscv.vleff.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  i32);
-
-define <vscale x 1 x i16> @intrinsic_vleff_v_nxv1i16_nxv1i16(<vscale x 1 x i16>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vle16ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x i16>, i32 } @llvm.riscv.vleff.nxv1i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 1 x i16>, i32 } %a, 0
-  %c = extractvalue { <vscale x 1 x i16>, i32 } %a, 1
-  store i32 %c, i32* %2
-  ret <vscale x 1 x i16> %b
-}
-
-declare { <vscale x 1 x i16>, i32 } @llvm.riscv.vleff.mask.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i16> @intrinsic_vleff_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x i16>, i32 } @llvm.riscv.vleff.mask.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
-    <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 1 x i16>, i32 } %a, 0
-  %c = extractvalue { <vscale x 1 x i16>, i32 } %a, 1
-  store i32 %c, i32* %4
-
-  ret <vscale x 1 x i16> %b
-}
-
-declare { <vscale x 2 x i16>, i32 } @llvm.riscv.vleff.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  i32);
-
-define <vscale x 2 x i16> @intrinsic_vleff_v_nxv2i16_nxv2i16(<vscale x 2 x i16>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vle16ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 2 x i16>, i32 } @llvm.riscv.vleff.nxv2i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 2 x i16>, i32 } %a, 0
-  %c = extractvalue { <vscale x 2 x i16>, i32 } %a, 1
-  store i32 %c, i32* %2
-  ret <vscale x 2 x i16> %b
-}
-
-declare { <vscale x 2 x i16>, i32 } @llvm.riscv.vleff.mask.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i16> @intrinsic_vleff_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 2 x i16>, i32 } @llvm.riscv.vleff.mask.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
-    <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 2 x i16>, i32 } %a, 0
-  %c = extractvalue { <vscale x 2 x i16>, i32 } %a, 1
-  store i32 %c, i32* %4
-
-  ret <vscale x 2 x i16> %b
-}
-
-declare { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vleff_v_nxv4i16_nxv4i16(<vscale x 4 x i16>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vle16ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.nxv4i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 4 x i16>, i32 } %a, 0
-  %c = extractvalue { <vscale x 4 x i16>, i32 } %a, 1
-  store i32 %c, i32* %2
-  ret <vscale x 4 x i16> %b
-}
-
-declare { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.mask.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vleff_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.mask.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
-    <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 4 x i16>, i32 } %a, 0
-  %c = extractvalue { <vscale x 4 x i16>, i32 } %a, 1
-  store i32 %c, i32* %4
-
-  ret <vscale x 4 x i16> %b
-}
-
-declare { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  i32);
-
-define <vscale x 8 x i16> @intrinsic_vleff_v_nxv8i16_nxv8i16(<vscale x 8 x i16>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vle16ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.nxv8i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 8 x i16>, i32 } %a, 0
-  %c = extractvalue { <vscale x 8 x i16>, i32 } %a, 1
-  store i32 %c, i32* %2
-  ret <vscale x 8 x i16> %b
-}
-
-declare { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.mask.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i16> @intrinsic_vleff_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.mask.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
-    <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 8 x i16>, i32 } %a, 0
-  %c = extractvalue { <vscale x 8 x i16>, i32 } %a, 1
-  store i32 %c, i32* %4
-
-  ret <vscale x 8 x i16> %b
-}
-
-declare { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
-  i32);
-
-define <vscale x 16 x i16> @intrinsic_vleff_v_nxv16i16_nxv16i16(<vscale x 16 x i16>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vle16ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.nxv16i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 16 x i16>, i32 } %a, 0
-  %c = extractvalue { <vscale x 16 x i16>, i32 } %a, 1
-  store i32 %c, i32* %2
-  ret <vscale x 16 x i16> %b
-}
-
-declare { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.mask.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i16> @intrinsic_vleff_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.mask.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
-    <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 16 x i16>, i32 } %a, 0
-  %c = extractvalue { <vscale x 16 x i16>, i32 } %a, 1
-  store i32 %c, i32* %4
-
-  ret <vscale x 16 x i16> %b
-}
-
-declare { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
-  i32);
-
-define <vscale x 32 x i16> @intrinsic_vleff_v_nxv32i16_nxv32i16(<vscale x 32 x i16>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT:    vle16ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.nxv32i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 32 x i16>, i32 } %a, 0
-  %c = extractvalue { <vscale x 32 x i16>, i32 } %a, 1
-  store i32 %c, i32* %2
-  ret <vscale x 32 x i16> %b
-}
-
-declare { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.mask.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
-  <vscale x 32 x i1>,
-  i32,
-  i32);
-
-define <vscale x 32 x i16> @intrinsic_vleff_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.mask.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
-    <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 32 x i16>, i32 } %a, 0
-  %c = extractvalue { <vscale x 32 x i16>, i32 } %a, 1
-  store i32 %c, i32* %4
-
-  ret <vscale x 32 x i16> %b
-}
-
-declare { <vscale x 1 x half>, i32 } @llvm.riscv.vleff.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  i32);
-
-define <vscale x 1 x half> @intrinsic_vleff_v_nxv1half_nxv1f16(<vscale x 1 x half>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv1half_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vle16ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x half>, i32 } @llvm.riscv.vleff.nxv1f16(
-    <vscale x 1 x half> undef,
-    <vscale x 1 x half>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 1 x half>, i32 } %a, 0
-  %c = extractvalue { <vscale x 1 x half>, i32 } %a, 1
-  store i32 %c, i32* %2
-  ret <vscale x 1 x half> %b
-}
-
-declare { <vscale x 1 x half>, i32 } @llvm.riscv.vleff.mask.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x half> @intrinsic_vleff_mask_v_nxv1half_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1half_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x half>, i32 } @llvm.riscv.vleff.mask.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
-    <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 1 x half>, i32 } %a, 0
-  %c = extractvalue { <vscale x 1 x half>, i32 } %a, 1
-  store i32 %c, i32* %4
-
-  ret <vscale x 1 x half> %b
-}
-
-declare { <vscale x 2 x half>, i32 } @llvm.riscv.vleff.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  i32);
-
-define <vscale x 2 x half> @intrinsic_vleff_v_nxv2half_nxv2f16(<vscale x 2 x half>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv2half_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vle16ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 2 x half>, i32 } @llvm.riscv.vleff.nxv2f16(
-    <vscale x 2 x half> undef,
-    <vscale x 2 x half>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 2 x half>, i32 } %a, 0
-  %c = extractvalue { <vscale x 2 x half>, i32 } %a, 1
-  store i32 %c, i32* %2
-  ret <vscale x 2 x half> %b
-}
-
-declare { <vscale x 2 x half>, i32 } @llvm.riscv.vleff.mask.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x half> @intrinsic_vleff_mask_v_nxv2half_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2half_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 2 x half>, i32 } @llvm.riscv.vleff.mask.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
-    <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 2 x half>, i32 } %a, 0
-  %c = extractvalue { <vscale x 2 x half>, i32 } %a, 1
-  store i32 %c, i32* %4
-
-  ret <vscale x 2 x half> %b
-}
-
-declare { <vscale x 4 x half>, i32 } @llvm.riscv.vleff.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  i32);
-
-define <vscale x 4 x half> @intrinsic_vleff_v_nxv4half_nxv4f16(<vscale x 4 x half>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv4half_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vle16ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 4 x half>, i32 } @llvm.riscv.vleff.nxv4f16(
-    <vscale x 4 x half> undef,
-    <vscale x 4 x half>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 4 x half>, i32 } %a, 0
-  %c = extractvalue { <vscale x 4 x half>, i32 } %a, 1
-  store i32 %c, i32* %2
-  ret <vscale x 4 x half> %b
-}
-
-declare { <vscale x 4 x half>, i32 } @llvm.riscv.vleff.mask.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x half> @intrinsic_vleff_mask_v_nxv4half_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4half_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 4 x half>, i32 } @llvm.riscv.vleff.mask.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
-    <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 4 x half>, i32 } %a, 0
-  %c = extractvalue { <vscale x 4 x half>, i32 } %a, 1
-  store i32 %c, i32* %4
-
-  ret <vscale x 4 x half> %b
-}
-
-declare { <vscale x 8 x half>, i32 } @llvm.riscv.vleff.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  i32);
-
-define <vscale x 8 x half> @intrinsic_vleff_v_nxv8half_nxv8f16(<vscale x 8 x half>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv8half_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vle16ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 8 x half>, i32 } @llvm.riscv.vleff.nxv8f16(
-    <vscale x 8 x half> undef,
-    <vscale x 8 x half>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 8 x half>, i32 } %a, 0
-  %c = extractvalue { <vscale x 8 x half>, i32 } %a, 1
-  store i32 %c, i32* %2
-  ret <vscale x 8 x half> %b
-}
-
-declare { <vscale x 8 x half>, i32 } @llvm.riscv.vleff.mask.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x half> @intrinsic_vleff_mask_v_nxv8half_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8half_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 8 x half>, i32 } @llvm.riscv.vleff.mask.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
-    <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 8 x half>, i32 } %a, 0
-  %c = extractvalue { <vscale x 8 x half>, i32 } %a, 1
-  store i32 %c, i32* %4
-
-  ret <vscale x 8 x half> %b
-}
-
-declare { <vscale x 16 x half>, i32 } @llvm.riscv.vleff.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>*,
-  i32);
-
-define <vscale x 16 x half> @intrinsic_vleff_v_nxv16half_nxv16f16(<vscale x 16 x half>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv16half_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vle16ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 16 x half>, i32 } @llvm.riscv.vleff.nxv16f16(
-    <vscale x 16 x half> undef,
-    <vscale x 16 x half>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 16 x half>, i32 } %a, 0
-  %c = extractvalue { <vscale x 16 x half>, i32 } %a, 1
-  store i32 %c, i32* %2
-  ret <vscale x 16 x half> %b
-}
-
-declare { <vscale x 16 x half>, i32 } @llvm.riscv.vleff.mask.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>*,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x half> @intrinsic_vleff_mask_v_nxv16half_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16half_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 16 x half>, i32 } @llvm.riscv.vleff.mask.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
-    <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 16 x half>, i32 } %a, 0
-  %c = extractvalue { <vscale x 16 x half>, i32 } %a, 1
-  store i32 %c, i32* %4
-
-  ret <vscale x 16 x half> %b
-}
-
-declare { <vscale x 32 x half>, i32 } @llvm.riscv.vleff.nxv32f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>*,
-  i32);
-
-define <vscale x 32 x half> @intrinsic_vleff_v_nxv32half_nxv32f16(<vscale x 32 x half>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv32half_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT:    vle16ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 32 x half>, i32 } @llvm.riscv.vleff.nxv32f16(
-    <vscale x 32 x half> undef,
-    <vscale x 32 x half>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 32 x half>, i32 } %a, 0
-  %c = extractvalue { <vscale x 32 x half>, i32 } %a, 1
-  store i32 %c, i32* %2
-  ret <vscale x 32 x half> %b
-}
-
-declare { <vscale x 32 x half>, i32 } @llvm.riscv.vleff.mask.nxv32f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>*,
-  <vscale x 32 x i1>,
-  i32,
-  i32);
-
-define <vscale x 32 x half> @intrinsic_vleff_mask_v_nxv32half_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32half_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 32 x half>, i32 } @llvm.riscv.vleff.mask.nxv32f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
-    <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 32 x half>, i32 } %a, 0
-  %c = extractvalue { <vscale x 32 x half>, i32 } %a, 1
-  store i32 %c, i32* %4
-
-  ret <vscale x 32 x half> %b
-}
-
-declare { <vscale x 1 x i8>, i32 } @llvm.riscv.vleff.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  i32);
-
-define <vscale x 1 x i8> @intrinsic_vleff_v_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vle8ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x i8>, i32 } @llvm.riscv.vleff.nxv1i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 1 x i8>, i32 } %a, 0
-  %c = extractvalue { <vscale x 1 x i8>, i32 } %a, 1
-  store i32 %c, i32* %2
-  ret <vscale x 1 x i8> %b
-}
-
-declare { <vscale x 1 x i8>, i32 } @llvm.riscv.vleff.mask.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i8> @intrinsic_vleff_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vle8ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x i8>, i32 } @llvm.riscv.vleff.mask.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
-    <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 1 x i8>, i32 } %a, 0
-  %c = extractvalue { <vscale x 1 x i8>, i32 } %a, 1
-  store i32 %c, i32* %4
-
-  ret <vscale x 1 x i8> %b
-}
-
-declare { <vscale x 2 x i8>, i32 } @llvm.riscv.vleff.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  i32);
-
-define <vscale x 2 x i8> @intrinsic_vleff_v_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vle8ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 2 x i8>, i32 } @llvm.riscv.vleff.nxv2i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 2 x i8>, i32 } %a, 0
-  %c = extractvalue { <vscale x 2 x i8>, i32 } %a, 1
-  store i32 %c, i32* %2
-  ret <vscale x 2 x i8> %b
-}
-
-declare { <vscale x 2 x i8>, i32 } @llvm.riscv.vleff.mask.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i8> @intrinsic_vleff_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vle8ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 2 x i8>, i32 } @llvm.riscv.vleff.mask.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
-    <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 2 x i8>, i32 } %a, 0
-  %c = extractvalue { <vscale x 2 x i8>, i32 } %a, 1
-  store i32 %c, i32* %4
-
-  ret <vscale x 2 x i8> %b
-}
-
-declare { <vscale x 4 x i8>, i32 } @llvm.riscv.vleff.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  i32);
-
-define <vscale x 4 x i8> @intrinsic_vleff_v_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vle8ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 4 x i8>, i32 } @llvm.riscv.vleff.nxv4i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 4 x i8>, i32 } %a, 0
-  %c = extractvalue { <vscale x 4 x i8>, i32 } %a, 1
-  store i32 %c, i32* %2
-  ret <vscale x 4 x i8> %b
-}
-
-declare { <vscale x 4 x i8>, i32 } @llvm.riscv.vleff.mask.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i8> @intrinsic_vleff_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vle8ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 4 x i8>, i32 } @llvm.riscv.vleff.mask.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
-    <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 4 x i8>, i32 } %a, 0
-  %c = extractvalue { <vscale x 4 x i8>, i32 } %a, 1
-  store i32 %c, i32* %4
-
-  ret <vscale x 4 x i8> %b
-}
-
-declare { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  i32);
-
-define <vscale x 8 x i8> @intrinsic_vleff_v_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vle8ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.nxv8i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 8 x i8>, i32 } %a, 0
-  %c = extractvalue { <vscale x 8 x i8>, i32 } %a, 1
-  store i32 %c, i32* %2
-  ret <vscale x 8 x i8> %b
-}
-
-declare { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.mask.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i8> @intrinsic_vleff_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vle8ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.mask.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
-    <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 8 x i8>, i32 } %a, 0
-  %c = extractvalue { <vscale x 8 x i8>, i32 } %a, 1
-  store i32 %c, i32* %4
-
-  ret <vscale x 8 x i8> %b
-}
-
-declare { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
-  i32);
-
-define <vscale x 16 x i8> @intrinsic_vleff_v_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vle8ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.nxv16i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 16 x i8>, i32 } %a, 0
-  %c = extractvalue { <vscale x 16 x i8>, i32 } %a, 1
-  store i32 %c, i32* %2
-  ret <vscale x 16 x i8> %b
-}
-
-declare { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.mask.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i8> @intrinsic_vleff_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vle8ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.mask.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
-    <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 16 x i8>, i32 } %a, 0
-  %c = extractvalue { <vscale x 16 x i8>, i32 } %a, 1
-  store i32 %c, i32* %4
-
-  ret <vscale x 16 x i8> %b
-}
-
-declare { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
-  i32);
-
-define <vscale x 32 x i8> @intrinsic_vleff_v_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vle8ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.nxv32i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 32 x i8>, i32 } %a, 0
-  %c = extractvalue { <vscale x 32 x i8>, i32 } %a, 1
-  store i32 %c, i32* %2
-  ret <vscale x 32 x i8> %b
-}
-
-declare { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.mask.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
-  <vscale x 32 x i1>,
-  i32,
-  i32);
-
-define <vscale x 32 x i8> @intrinsic_vleff_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vle8ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.mask.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
-    <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 32 x i8>, i32 } %a, 0
-  %c = extractvalue { <vscale x 32 x i8>, i32 } %a, 1
-  store i32 %c, i32* %4
-
-  ret <vscale x 32 x i8> %b
-}
-
-declare { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>*,
-  i32);
-
-define <vscale x 64 x i8> @intrinsic_vleff_v_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT:    vle8ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.nxv64i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 64 x i8>, i32 } %a, 0
-  %c = extractvalue { <vscale x 64 x i8>, i32 } %a, 1
-  store i32 %c, i32* %2
-  ret <vscale x 64 x i8> %b
-}
-
-declare { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.mask.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>*,
-  <vscale x 64 x i1>,
-  i32,
-  i32);
-
-define <vscale x 64 x i8> @intrinsic_vleff_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vle8ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.mask.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8>* %1,
-    <vscale x 64 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 64 x i8>, i32 } %a, 0
-  %c = extractvalue { <vscale x 64 x i8>, i32 } %a, 1
-  store i32 %c, i32* %4
-
-  ret <vscale x 64 x i8> %b
-}
-
-; Test with the VL output unused
-define <vscale x 1 x double> @intrinsic_vleff_dead_vl(<vscale x 1 x double>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_dead_vl:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vle64ff.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x double>, i32 } @llvm.riscv.vleff.nxv1f64(
-    <vscale x 1 x double> undef,
-    <vscale x 1 x double>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 1 x double>, i32 } %a, 0
-  ret <vscale x 1 x double> %b
-}
-
-define <vscale x 1 x double> @intrinsic_vleff_mask_dead_vl(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_dead_vl:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x double>, i32 } @llvm.riscv.vleff.mask.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
-    <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 1 x double>, i32 } %a, 0
-
-  ret <vscale x 1 x double> %b
-}
-
-; Test with the loaded value unused
-define void @intrinsic_vleff_dead_value(<vscale x 1 x double>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_dead_value:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vle64ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x double>, i32 } @llvm.riscv.vleff.nxv1f64(
-    <vscale x 1 x double> undef,
-    <vscale x 1 x double>* %0,
-    i32 %1)
-  %b = extractvalue { <vscale x 1 x double>, i32 } %a, 1
-  store i32 %b, i32* %2
-  ret void
-}
-
-define void @intrinsic_vleff_mask_dead_value(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, i32 %3, i32* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_dead_value:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x double>, i32 } @llvm.riscv.vleff.mask.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
-    <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
-  %b = extractvalue { <vscale x 1 x double>, i32 } %a, 1
-  store i32 %b, i32* %4
-
-  ret void
-}
-
-; Test with both outputs dead. Make sure the vleff isn't deleted.
-define void @intrinsic_vleff_dead_all(<vscale x 1 x double>* %0, i32 %1, i32* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_dead_all:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vle64ff.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x double>, i32 } @llvm.riscv.vleff.nxv1f64(
-    <vscale x 1 x double> undef,
-    <vscale x 1 x double>* %0,
-    i32 %1)
-  ret void
-}
-
-define void @intrinsic_vleff_mask_dead_all(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_dead_all:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x double>, i32 } @llvm.riscv.vleff.mask.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
-    <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
-
-  ret void
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll
deleted file mode 100644
index 3eefd9a52cb1b..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll
+++ /dev/null
@@ -1,2029 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh,+experimental-zvfh,+f,+d -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare { <vscale x 1 x i64>, i64 } @llvm.riscv.vleff.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vleff_v_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vle64ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x i64>, i64 } @llvm.riscv.vleff.nxv1i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 1 x i64>, i64 } %a, 0
-  %c = extractvalue { <vscale x 1 x i64>, i64 } %a, 1
-  store i64 %c, i64* %2
-  ret <vscale x 1 x i64> %b
-}
-
-declare { <vscale x 1 x i64>, i64 } @llvm.riscv.vleff.mask.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x i64>, i64 } @llvm.riscv.vleff.mask.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 1 x i64>, i64 } %a, 0
-  %c = extractvalue { <vscale x 1 x i64>, i64 } %a, 1
-  store i64 %c, i64* %4
-
-  ret <vscale x 1 x i64> %b
-}
-
-declare { <vscale x 2 x i64>, i64 } @llvm.riscv.vleff.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vleff_v_nxv2i64_nxv2i64(<vscale x 2 x i64>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vle64ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 2 x i64>, i64 } @llvm.riscv.vleff.nxv2i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 2 x i64>, i64 } %a, 0
-  %c = extractvalue { <vscale x 2 x i64>, i64 } %a, 1
-  store i64 %c, i64* %2
-  ret <vscale x 2 x i64> %b
-}
-
-declare { <vscale x 2 x i64>, i64 } @llvm.riscv.vleff.mask.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vleff_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 2 x i64>, i64 } @llvm.riscv.vleff.mask.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 2 x i64>, i64 } %a, 0
-  %c = extractvalue { <vscale x 2 x i64>, i64 } %a, 1
-  store i64 %c, i64* %4
-
-  ret <vscale x 2 x i64> %b
-}
-
-declare { <vscale x 4 x i64>, i64 } @llvm.riscv.vleff.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vleff_v_nxv4i64_nxv4i64(<vscale x 4 x i64>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vle64ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 4 x i64>, i64 } @llvm.riscv.vleff.nxv4i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 4 x i64>, i64 } %a, 0
-  %c = extractvalue { <vscale x 4 x i64>, i64 } %a, 1
-  store i64 %c, i64* %2
-  ret <vscale x 4 x i64> %b
-}
-
-declare { <vscale x 4 x i64>, i64 } @llvm.riscv.vleff.mask.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vleff_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 4 x i64>, i64 } @llvm.riscv.vleff.mask.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 4 x i64>, i64 } %a, 0
-  %c = extractvalue { <vscale x 4 x i64>, i64 } %a, 1
-  store i64 %c, i64* %4
-
-  ret <vscale x 4 x i64> %b
-}
-
-declare { <vscale x 8 x i64>, i64 } @llvm.riscv.vleff.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vleff_v_nxv8i64_nxv8i64(<vscale x 8 x i64>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT:    vle64ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 8 x i64>, i64 } @llvm.riscv.vleff.nxv8i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 8 x i64>, i64 } %a, 0
-  %c = extractvalue { <vscale x 8 x i64>, i64 } %a, 1
-  store i64 %c, i64* %2
-  ret <vscale x 8 x i64> %b
-}
-
-declare { <vscale x 8 x i64>, i64 } @llvm.riscv.vleff.mask.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vleff_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 8 x i64>, i64 } @llvm.riscv.vleff.mask.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 8 x i64>, i64 } %a, 0
-  %c = extractvalue { <vscale x 8 x i64>, i64 } %a, 1
-  store i64 %c, i64* %4
-
-  ret <vscale x 8 x i64> %b
-}
-
-declare { <vscale x 1 x double>, i64 } @llvm.riscv.vleff.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vleff_v_nxv1f64_nxv1f64(<vscale x 1 x double>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vle64ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x double>, i64 } @llvm.riscv.vleff.nxv1f64(
-    <vscale x 1 x double> undef,
-    <vscale x 1 x double>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 1 x double>, i64 } %a, 0
-  %c = extractvalue { <vscale x 1 x double>, i64 } %a, 1
-  store i64 %c, i64* %2
-  ret <vscale x 1 x double> %b
-}
-
-declare { <vscale x 1 x double>, i64 } @llvm.riscv.vleff.mask.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vleff_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x double>, i64 } @llvm.riscv.vleff.mask.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 1 x double>, i64 } %a, 0
-  %c = extractvalue { <vscale x 1 x double>, i64 } %a, 1
-  store i64 %c, i64* %4
-
-  ret <vscale x 1 x double> %b
-}
-
-declare { <vscale x 2 x double>, i64 } @llvm.riscv.vleff.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vleff_v_nxv2f64_nxv2f64(<vscale x 2 x double>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vle64ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 2 x double>, i64 } @llvm.riscv.vleff.nxv2f64(
-    <vscale x 2 x double> undef,
-    <vscale x 2 x double>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 2 x double>, i64 } %a, 0
-  %c = extractvalue { <vscale x 2 x double>, i64 } %a, 1
-  store i64 %c, i64* %2
-  ret <vscale x 2 x double> %b
-}
-
-declare { <vscale x 2 x double>, i64 } @llvm.riscv.vleff.mask.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vleff_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 2 x double>, i64 } @llvm.riscv.vleff.mask.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 2 x double>, i64 } %a, 0
-  %c = extractvalue { <vscale x 2 x double>, i64 } %a, 1
-  store i64 %c, i64* %4
-
-  ret <vscale x 2 x double> %b
-}
-
-declare { <vscale x 4 x double>, i64 } @llvm.riscv.vleff.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vleff_v_nxv4f64_nxv4f64(<vscale x 4 x double>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vle64ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 4 x double>, i64 } @llvm.riscv.vleff.nxv4f64(
-    <vscale x 4 x double> undef,
-    <vscale x 4 x double>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 4 x double>, i64 } %a, 0
-  %c = extractvalue { <vscale x 4 x double>, i64 } %a, 1
-  store i64 %c, i64* %2
-  ret <vscale x 4 x double> %b
-}
-
-declare { <vscale x 4 x double>, i64 } @llvm.riscv.vleff.mask.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vleff_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 4 x double>, i64 } @llvm.riscv.vleff.mask.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 4 x double>, i64 } %a, 0
-  %c = extractvalue { <vscale x 4 x double>, i64 } %a, 1
-  store i64 %c, i64* %4
-
-  ret <vscale x 4 x double> %b
-}
-
-declare { <vscale x 8 x double>, i64 } @llvm.riscv.vleff.nxv8f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vleff_v_nxv8f64_nxv8f64(<vscale x 8 x double>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv8f64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT:    vle64ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 8 x double>, i64 } @llvm.riscv.vleff.nxv8f64(
-    <vscale x 8 x double> undef,
-    <vscale x 8 x double>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 8 x double>, i64 } %a, 0
-  %c = extractvalue { <vscale x 8 x double>, i64 } %a, 1
-  store i64 %c, i64* %2
-  ret <vscale x 8 x double> %b
-}
-
-declare { <vscale x 8 x double>, i64 } @llvm.riscv.vleff.mask.nxv8f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vleff_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 8 x double>, i64 } @llvm.riscv.vleff.mask.nxv8f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 8 x double>, i64 } %a, 0
-  %c = extractvalue { <vscale x 8 x double>, i64 } %a, 1
-  store i64 %c, i64* %4
-
-  ret <vscale x 8 x double> %b
-}
-
-declare { <vscale x 1 x i32>, i64 } @llvm.riscv.vleff.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vleff_v_nxv1i32_nxv1i32(<vscale x 1 x i32>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vle32ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x i32>, i64 } @llvm.riscv.vleff.nxv1i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 1 x i32>, i64 } %a, 0
-  %c = extractvalue { <vscale x 1 x i32>, i64 } %a, 1
-  store i64 %c, i64* %2
-  ret <vscale x 1 x i32> %b
-}
-
-declare { <vscale x 1 x i32>, i64 } @llvm.riscv.vleff.mask.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vleff_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x i32>, i64 } @llvm.riscv.vleff.mask.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 1 x i32>, i64 } %a, 0
-  %c = extractvalue { <vscale x 1 x i32>, i64 } %a, 1
-  store i64 %c, i64* %4
-
-  ret <vscale x 1 x i32> %b
-}
-
-declare { <vscale x 2 x i32>, i64 } @llvm.riscv.vleff.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vleff_v_nxv2i32_nxv2i32(<vscale x 2 x i32>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vle32ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 2 x i32>, i64 } @llvm.riscv.vleff.nxv2i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 2 x i32>, i64 } %a, 0
-  %c = extractvalue { <vscale x 2 x i32>, i64 } %a, 1
-  store i64 %c, i64* %2
-  ret <vscale x 2 x i32> %b
-}
-
-declare { <vscale x 2 x i32>, i64 } @llvm.riscv.vleff.mask.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vleff_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 2 x i32>, i64 } @llvm.riscv.vleff.mask.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 2 x i32>, i64 } %a, 0
-  %c = extractvalue { <vscale x 2 x i32>, i64 } %a, 1
-  store i64 %c, i64* %4
-
-  ret <vscale x 2 x i32> %b
-}
-
-declare { <vscale x 4 x i32>, i64 } @llvm.riscv.vleff.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vleff_v_nxv4i32_nxv4i32(<vscale x 4 x i32>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vle32ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 4 x i32>, i64 } @llvm.riscv.vleff.nxv4i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 4 x i32>, i64 } %a, 0
-  %c = extractvalue { <vscale x 4 x i32>, i64 } %a, 1
-  store i64 %c, i64* %2
-  ret <vscale x 4 x i32> %b
-}
-
-declare { <vscale x 4 x i32>, i64 } @llvm.riscv.vleff.mask.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vleff_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 4 x i32>, i64 } @llvm.riscv.vleff.mask.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 4 x i32>, i64 } %a, 0
-  %c = extractvalue { <vscale x 4 x i32>, i64 } %a, 1
-  store i64 %c, i64* %4
-
-  ret <vscale x 4 x i32> %b
-}
-
-declare { <vscale x 8 x i32>, i64 } @llvm.riscv.vleff.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vleff_v_nxv8i32_nxv8i32(<vscale x 8 x i32>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vle32ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 8 x i32>, i64 } @llvm.riscv.vleff.nxv8i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 8 x i32>, i64 } %a, 0
-  %c = extractvalue { <vscale x 8 x i32>, i64 } %a, 1
-  store i64 %c, i64* %2
-  ret <vscale x 8 x i32> %b
-}
-
-declare { <vscale x 8 x i32>, i64 } @llvm.riscv.vleff.mask.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vleff_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 8 x i32>, i64 } @llvm.riscv.vleff.mask.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 8 x i32>, i64 } %a, 0
-  %c = extractvalue { <vscale x 8 x i32>, i64 } %a, 1
-  store i64 %c, i64* %4
-
-  ret <vscale x 8 x i32> %b
-}
-
-declare { <vscale x 16 x i32>, i64 } @llvm.riscv.vleff.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vleff_v_nxv16i32_nxv16i32(<vscale x 16 x i32>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT:    vle32ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 16 x i32>, i64 } @llvm.riscv.vleff.nxv16i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 16 x i32>, i64 } %a, 0
-  %c = extractvalue { <vscale x 16 x i32>, i64 } %a, 1
-  store i64 %c, i64* %2
-  ret <vscale x 16 x i32> %b
-}
-
-declare { <vscale x 16 x i32>, i64 } @llvm.riscv.vleff.mask.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vleff_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 16 x i32>, i64 } @llvm.riscv.vleff.mask.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 16 x i32>, i64 } %a, 0
-  %c = extractvalue { <vscale x 16 x i32>, i64 } %a, 1
-  store i64 %c, i64* %4
-
-  ret <vscale x 16 x i32> %b
-}
-
-declare { <vscale x 1 x float>, i64 } @llvm.riscv.vleff.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vleff_v_nxv1f32_nxv1f32(<vscale x 1 x float>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vle32ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x float>, i64 } @llvm.riscv.vleff.nxv1f32(
-    <vscale x 1 x float> undef,
-    <vscale x 1 x float>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 1 x float>, i64 } %a, 0
-  %c = extractvalue { <vscale x 1 x float>, i64 } %a, 1
-  store i64 %c, i64* %2
-  ret <vscale x 1 x float> %b
-}
-
-declare { <vscale x 1 x float>, i64 } @llvm.riscv.vleff.mask.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vleff_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x float>, i64 } @llvm.riscv.vleff.mask.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 1 x float>, i64 } %a, 0
-  %c = extractvalue { <vscale x 1 x float>, i64 } %a, 1
-  store i64 %c, i64* %4
-
-  ret <vscale x 1 x float> %b
-}
-
-declare { <vscale x 2 x float>, i64 } @llvm.riscv.vleff.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vleff_v_nxv2f32_nxv2f32(<vscale x 2 x float>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vle32ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 2 x float>, i64 } @llvm.riscv.vleff.nxv2f32(
-    <vscale x 2 x float> undef,
-    <vscale x 2 x float>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 2 x float>, i64 } %a, 0
-  %c = extractvalue { <vscale x 2 x float>, i64 } %a, 1
-  store i64 %c, i64* %2
-  ret <vscale x 2 x float> %b
-}
-
-declare { <vscale x 2 x float>, i64 } @llvm.riscv.vleff.mask.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vleff_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 2 x float>, i64 } @llvm.riscv.vleff.mask.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 2 x float>, i64 } %a, 0
-  %c = extractvalue { <vscale x 2 x float>, i64 } %a, 1
-  store i64 %c, i64* %4
-
-  ret <vscale x 2 x float> %b
-}
-
-declare { <vscale x 4 x float>, i64 } @llvm.riscv.vleff.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vleff_v_nxv4f32_nxv4f32(<vscale x 4 x float>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vle32ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 4 x float>, i64 } @llvm.riscv.vleff.nxv4f32(
-    <vscale x 4 x float> undef,
-    <vscale x 4 x float>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 4 x float>, i64 } %a, 0
-  %c = extractvalue { <vscale x 4 x float>, i64 } %a, 1
-  store i64 %c, i64* %2
-  ret <vscale x 4 x float> %b
-}
-
-declare { <vscale x 4 x float>, i64 } @llvm.riscv.vleff.mask.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vleff_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 4 x float>, i64 } @llvm.riscv.vleff.mask.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 4 x float>, i64 } %a, 0
-  %c = extractvalue { <vscale x 4 x float>, i64 } %a, 1
-  store i64 %c, i64* %4
-
-  ret <vscale x 4 x float> %b
-}
-
-declare { <vscale x 8 x float>, i64 } @llvm.riscv.vleff.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vleff_v_nxv8f32_nxv8f32(<vscale x 8 x float>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vle32ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 8 x float>, i64 } @llvm.riscv.vleff.nxv8f32(
-    <vscale x 8 x float> undef,
-    <vscale x 8 x float>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 8 x float>, i64 } %a, 0
-  %c = extractvalue { <vscale x 8 x float>, i64 } %a, 1
-  store i64 %c, i64* %2
-  ret <vscale x 8 x float> %b
-}
-
-declare { <vscale x 8 x float>, i64 } @llvm.riscv.vleff.mask.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vleff_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 8 x float>, i64 } @llvm.riscv.vleff.mask.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 8 x float>, i64 } %a, 0
-  %c = extractvalue { <vscale x 8 x float>, i64 } %a, 1
-  store i64 %c, i64* %4
-
-  ret <vscale x 8 x float> %b
-}
-
-declare { <vscale x 16 x float>, i64 } @llvm.riscv.vleff.nxv16f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>*,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vleff_v_nxv16f32_nxv16f32(<vscale x 16 x float>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv16f32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT:    vle32ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 16 x float>, i64 } @llvm.riscv.vleff.nxv16f32(
-    <vscale x 16 x float> undef,
-    <vscale x 16 x float>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 16 x float>, i64 } %a, 0
-  %c = extractvalue { <vscale x 16 x float>, i64 } %a, 1
-  store i64 %c, i64* %2
-  ret <vscale x 16 x float> %b
-}
-
-declare { <vscale x 16 x float>, i64 } @llvm.riscv.vleff.mask.nxv16f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>*,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vleff_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16f32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vle32ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 16 x float>, i64 } @llvm.riscv.vleff.mask.nxv16f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 16 x float>, i64 } %a, 0
-  %c = extractvalue { <vscale x 16 x float>, i64 } %a, 1
-  store i64 %c, i64* %4
-
-  ret <vscale x 16 x float> %b
-}
-
-declare { <vscale x 1 x i16>, i64 } @llvm.riscv.vleff.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vleff_v_nxv1i16_nxv1i16(<vscale x 1 x i16>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vle16ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x i16>, i64 } @llvm.riscv.vleff.nxv1i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 1 x i16>, i64 } %a, 0
-  %c = extractvalue { <vscale x 1 x i16>, i64 } %a, 1
-  store i64 %c, i64* %2
-  ret <vscale x 1 x i16> %b
-}
-
-declare { <vscale x 1 x i16>, i64 } @llvm.riscv.vleff.mask.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vleff_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x i16>, i64 } @llvm.riscv.vleff.mask.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 1 x i16>, i64 } %a, 0
-  %c = extractvalue { <vscale x 1 x i16>, i64 } %a, 1
-  store i64 %c, i64* %4
-
-  ret <vscale x 1 x i16> %b
-}
-
-declare { <vscale x 2 x i16>, i64 } @llvm.riscv.vleff.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vleff_v_nxv2i16_nxv2i16(<vscale x 2 x i16>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vle16ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 2 x i16>, i64 } @llvm.riscv.vleff.nxv2i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 2 x i16>, i64 } %a, 0
-  %c = extractvalue { <vscale x 2 x i16>, i64 } %a, 1
-  store i64 %c, i64* %2
-  ret <vscale x 2 x i16> %b
-}
-
-declare { <vscale x 2 x i16>, i64 } @llvm.riscv.vleff.mask.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vleff_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 2 x i16>, i64 } @llvm.riscv.vleff.mask.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 2 x i16>, i64 } %a, 0
-  %c = extractvalue { <vscale x 2 x i16>, i64 } %a, 1
-  store i64 %c, i64* %4
-
-  ret <vscale x 2 x i16> %b
-}
-
-declare { <vscale x 4 x i16>, i64 } @llvm.riscv.vleff.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vleff_v_nxv4i16_nxv4i16(<vscale x 4 x i16>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vle16ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 4 x i16>, i64 } @llvm.riscv.vleff.nxv4i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 4 x i16>, i64 } %a, 0
-  %c = extractvalue { <vscale x 4 x i16>, i64 } %a, 1
-  store i64 %c, i64* %2
-  ret <vscale x 4 x i16> %b
-}
-
-declare { <vscale x 4 x i16>, i64 } @llvm.riscv.vleff.mask.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vleff_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 4 x i16>, i64 } @llvm.riscv.vleff.mask.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 4 x i16>, i64 } %a, 0
-  %c = extractvalue { <vscale x 4 x i16>, i64 } %a, 1
-  store i64 %c, i64* %4
-
-  ret <vscale x 4 x i16> %b
-}
-
-declare { <vscale x 8 x i16>, i64 } @llvm.riscv.vleff.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vleff_v_nxv8i16_nxv8i16(<vscale x 8 x i16>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vle16ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 8 x i16>, i64 } @llvm.riscv.vleff.nxv8i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 8 x i16>, i64 } %a, 0
-  %c = extractvalue { <vscale x 8 x i16>, i64 } %a, 1
-  store i64 %c, i64* %2
-  ret <vscale x 8 x i16> %b
-}
-
-declare { <vscale x 8 x i16>, i64 } @llvm.riscv.vleff.mask.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vleff_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 8 x i16>, i64 } @llvm.riscv.vleff.mask.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 8 x i16>, i64 } %a, 0
-  %c = extractvalue { <vscale x 8 x i16>, i64 } %a, 1
-  store i64 %c, i64* %4
-
-  ret <vscale x 8 x i16> %b
-}
-
-declare { <vscale x 16 x i16>, i64 } @llvm.riscv.vleff.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vleff_v_nxv16i16_nxv16i16(<vscale x 16 x i16>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vle16ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 16 x i16>, i64 } @llvm.riscv.vleff.nxv16i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 16 x i16>, i64 } %a, 0
-  %c = extractvalue { <vscale x 16 x i16>, i64 } %a, 1
-  store i64 %c, i64* %2
-  ret <vscale x 16 x i16> %b
-}
-
-declare { <vscale x 16 x i16>, i64 } @llvm.riscv.vleff.mask.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vleff_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 16 x i16>, i64 } @llvm.riscv.vleff.mask.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 16 x i16>, i64 } %a, 0
-  %c = extractvalue { <vscale x 16 x i16>, i64 } %a, 1
-  store i64 %c, i64* %4
-
-  ret <vscale x 16 x i16> %b
-}
-
-declare { <vscale x 32 x i16>, i64 } @llvm.riscv.vleff.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vleff_v_nxv32i16_nxv32i16(<vscale x 32 x i16>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT:    vle16ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 32 x i16>, i64 } @llvm.riscv.vleff.nxv32i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 32 x i16>, i64 } %a, 0
-  %c = extractvalue { <vscale x 32 x i16>, i64 } %a, 1
-  store i64 %c, i64* %2
-  ret <vscale x 32 x i16> %b
-}
-
-declare { <vscale x 32 x i16>, i64 } @llvm.riscv.vleff.mask.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vleff_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 32 x i16>, i64 } @llvm.riscv.vleff.mask.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
-    <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 32 x i16>, i64 } %a, 0
-  %c = extractvalue { <vscale x 32 x i16>, i64 } %a, 1
-  store i64 %c, i64* %4
-
-  ret <vscale x 32 x i16> %b
-}
-
-declare { <vscale x 1 x half>, i64 } @llvm.riscv.vleff.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vleff_v_nxv1half_nxv1f16(<vscale x 1 x half>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv1half_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vle16ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x half>, i64 } @llvm.riscv.vleff.nxv1f16(
-    <vscale x 1 x half> undef,
-    <vscale x 1 x half>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 1 x half>, i64 } %a, 0
-  %c = extractvalue { <vscale x 1 x half>, i64 } %a, 1
-  store i64 %c, i64* %2
-  ret <vscale x 1 x half> %b
-}
-
-declare { <vscale x 1 x half>, i64 } @llvm.riscv.vleff.mask.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vleff_mask_v_nxv1half_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1half_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x half>, i64 } @llvm.riscv.vleff.mask.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 1 x half>, i64 } %a, 0
-  %c = extractvalue { <vscale x 1 x half>, i64 } %a, 1
-  store i64 %c, i64* %4
-
-  ret <vscale x 1 x half> %b
-}
-
-declare { <vscale x 2 x half>, i64 } @llvm.riscv.vleff.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vleff_v_nxv2half_nxv2f16(<vscale x 2 x half>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv2half_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vle16ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 2 x half>, i64 } @llvm.riscv.vleff.nxv2f16(
-    <vscale x 2 x half> undef,
-    <vscale x 2 x half>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 2 x half>, i64 } %a, 0
-  %c = extractvalue { <vscale x 2 x half>, i64 } %a, 1
-  store i64 %c, i64* %2
-  ret <vscale x 2 x half> %b
-}
-
-declare { <vscale x 2 x half>, i64 } @llvm.riscv.vleff.mask.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vleff_mask_v_nxv2half_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2half_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 2 x half>, i64 } @llvm.riscv.vleff.mask.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 2 x half>, i64 } %a, 0
-  %c = extractvalue { <vscale x 2 x half>, i64 } %a, 1
-  store i64 %c, i64* %4
-
-  ret <vscale x 2 x half> %b
-}
-
-declare { <vscale x 4 x half>, i64 } @llvm.riscv.vleff.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vleff_v_nxv4half_nxv4f16(<vscale x 4 x half>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv4half_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vle16ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 4 x half>, i64 } @llvm.riscv.vleff.nxv4f16(
-    <vscale x 4 x half> undef,
-    <vscale x 4 x half>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 4 x half>, i64 } %a, 0
-  %c = extractvalue { <vscale x 4 x half>, i64 } %a, 1
-  store i64 %c, i64* %2
-  ret <vscale x 4 x half> %b
-}
-
-declare { <vscale x 4 x half>, i64 } @llvm.riscv.vleff.mask.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vleff_mask_v_nxv4half_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4half_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 4 x half>, i64 } @llvm.riscv.vleff.mask.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 4 x half>, i64 } %a, 0
-  %c = extractvalue { <vscale x 4 x half>, i64 } %a, 1
-  store i64 %c, i64* %4
-
-  ret <vscale x 4 x half> %b
-}
-
-declare { <vscale x 8 x half>, i64 } @llvm.riscv.vleff.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vleff_v_nxv8half_nxv8f16(<vscale x 8 x half>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv8half_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vle16ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 8 x half>, i64 } @llvm.riscv.vleff.nxv8f16(
-    <vscale x 8 x half> undef,
-    <vscale x 8 x half>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 8 x half>, i64 } %a, 0
-  %c = extractvalue { <vscale x 8 x half>, i64 } %a, 1
-  store i64 %c, i64* %2
-  ret <vscale x 8 x half> %b
-}
-
-declare { <vscale x 8 x half>, i64 } @llvm.riscv.vleff.mask.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vleff_mask_v_nxv8half_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8half_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 8 x half>, i64 } @llvm.riscv.vleff.mask.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 8 x half>, i64 } %a, 0
-  %c = extractvalue { <vscale x 8 x half>, i64 } %a, 1
-  store i64 %c, i64* %4
-
-  ret <vscale x 8 x half> %b
-}
-
-declare { <vscale x 16 x half>, i64 } @llvm.riscv.vleff.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>*,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vleff_v_nxv16half_nxv16f16(<vscale x 16 x half>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv16half_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vle16ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 16 x half>, i64 } @llvm.riscv.vleff.nxv16f16(
-    <vscale x 16 x half> undef,
-    <vscale x 16 x half>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 16 x half>, i64 } %a, 0
-  %c = extractvalue { <vscale x 16 x half>, i64 } %a, 1
-  store i64 %c, i64* %2
-  ret <vscale x 16 x half> %b
-}
-
-declare { <vscale x 16 x half>, i64 } @llvm.riscv.vleff.mask.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>*,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vleff_mask_v_nxv16half_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16half_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 16 x half>, i64 } @llvm.riscv.vleff.mask.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 16 x half>, i64 } %a, 0
-  %c = extractvalue { <vscale x 16 x half>, i64 } %a, 1
-  store i64 %c, i64* %4
-
-  ret <vscale x 16 x half> %b
-}
-
-declare { <vscale x 32 x half>, i64 } @llvm.riscv.vleff.nxv32f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>*,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vleff_v_nxv32half_nxv32f16(<vscale x 32 x half>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv32half_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT:    vle16ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 32 x half>, i64 } @llvm.riscv.vleff.nxv32f16(
-    <vscale x 32 x half> undef,
-    <vscale x 32 x half>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 32 x half>, i64 } %a, 0
-  %c = extractvalue { <vscale x 32 x half>, i64 } %a, 1
-  store i64 %c, i64* %2
-  ret <vscale x 32 x half> %b
-}
-
-declare { <vscale x 32 x half>, i64 } @llvm.riscv.vleff.mask.nxv32f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>*,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vleff_mask_v_nxv32half_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32half_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vle16ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 32 x half>, i64 } @llvm.riscv.vleff.mask.nxv32f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
-    <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 32 x half>, i64 } %a, 0
-  %c = extractvalue { <vscale x 32 x half>, i64 } %a, 1
-  store i64 %c, i64* %4
-
-  ret <vscale x 32 x half> %b
-}
-
-declare { <vscale x 1 x i8>, i64 } @llvm.riscv.vleff.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vleff_v_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vle8ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x i8>, i64 } @llvm.riscv.vleff.nxv1i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 1 x i8>, i64 } %a, 0
-  %c = extractvalue { <vscale x 1 x i8>, i64 } %a, 1
-  store i64 %c, i64* %2
-  ret <vscale x 1 x i8> %b
-}
-
-declare { <vscale x 1 x i8>, i64 } @llvm.riscv.vleff.mask.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vleff_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vle8ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x i8>, i64 } @llvm.riscv.vleff.mask.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 1 x i8>, i64 } %a, 0
-  %c = extractvalue { <vscale x 1 x i8>, i64 } %a, 1
-  store i64 %c, i64* %4
-
-  ret <vscale x 1 x i8> %b
-}
-
-declare { <vscale x 2 x i8>, i64 } @llvm.riscv.vleff.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vleff_v_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vle8ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 2 x i8>, i64 } @llvm.riscv.vleff.nxv2i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 2 x i8>, i64 } %a, 0
-  %c = extractvalue { <vscale x 2 x i8>, i64 } %a, 1
-  store i64 %c, i64* %2
-  ret <vscale x 2 x i8> %b
-}
-
-declare { <vscale x 2 x i8>, i64 } @llvm.riscv.vleff.mask.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vleff_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vle8ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 2 x i8>, i64 } @llvm.riscv.vleff.mask.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 2 x i8>, i64 } %a, 0
-  %c = extractvalue { <vscale x 2 x i8>, i64 } %a, 1
-  store i64 %c, i64* %4
-
-  ret <vscale x 2 x i8> %b
-}
-
-declare { <vscale x 4 x i8>, i64 } @llvm.riscv.vleff.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vleff_v_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vle8ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 4 x i8>, i64 } @llvm.riscv.vleff.nxv4i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 4 x i8>, i64 } %a, 0
-  %c = extractvalue { <vscale x 4 x i8>, i64 } %a, 1
-  store i64 %c, i64* %2
-  ret <vscale x 4 x i8> %b
-}
-
-declare { <vscale x 4 x i8>, i64 } @llvm.riscv.vleff.mask.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vleff_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vle8ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 4 x i8>, i64 } @llvm.riscv.vleff.mask.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 4 x i8>, i64 } %a, 0
-  %c = extractvalue { <vscale x 4 x i8>, i64 } %a, 1
-  store i64 %c, i64* %4
-
-  ret <vscale x 4 x i8> %b
-}
-
-declare { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vleff_v_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vle8ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.nxv8i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 8 x i8>, i64 } %a, 0
-  %c = extractvalue { <vscale x 8 x i8>, i64 } %a, 1
-  store i64 %c, i64* %2
-  ret <vscale x 8 x i8> %b
-}
-
-declare { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.mask.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vleff_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vle8ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.mask.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 8 x i8>, i64 } %a, 0
-  %c = extractvalue { <vscale x 8 x i8>, i64 } %a, 1
-  store i64 %c, i64* %4
-
-  ret <vscale x 8 x i8> %b
-}
-
-declare { <vscale x 16 x i8>, i64 } @llvm.riscv.vleff.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vleff_v_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vle8ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 16 x i8>, i64 } @llvm.riscv.vleff.nxv16i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 16 x i8>, i64 } %a, 0
-  %c = extractvalue { <vscale x 16 x i8>, i64 } %a, 1
-  store i64 %c, i64* %2
-  ret <vscale x 16 x i8> %b
-}
-
-declare { <vscale x 16 x i8>, i64 } @llvm.riscv.vleff.mask.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vleff_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vle8ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 16 x i8>, i64 } @llvm.riscv.vleff.mask.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 16 x i8>, i64 } %a, 0
-  %c = extractvalue { <vscale x 16 x i8>, i64 } %a, 1
-  store i64 %c, i64* %4
-
-  ret <vscale x 16 x i8> %b
-}
-
-declare { <vscale x 32 x i8>, i64 } @llvm.riscv.vleff.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vleff_v_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vle8ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 32 x i8>, i64 } @llvm.riscv.vleff.nxv32i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 32 x i8>, i64 } %a, 0
-  %c = extractvalue { <vscale x 32 x i8>, i64 } %a, 1
-  store i64 %c, i64* %2
-  ret <vscale x 32 x i8> %b
-}
-
-declare { <vscale x 32 x i8>, i64 } @llvm.riscv.vleff.mask.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vleff_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vle8ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 32 x i8>, i64 } @llvm.riscv.vleff.mask.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
-    <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 32 x i8>, i64 } %a, 0
-  %c = extractvalue { <vscale x 32 x i8>, i64 } %a, 1
-  store i64 %c, i64* %4
-
-  ret <vscale x 32 x i8> %b
-}
-
-declare { <vscale x 64 x i8>, i64 } @llvm.riscv.vleff.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>*,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vleff_v_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_v_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT:    vle8ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 64 x i8>, i64 } @llvm.riscv.vleff.nxv64i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 64 x i8>, i64 } %a, 0
-  %c = extractvalue { <vscale x 64 x i8>, i64 } %a, 1
-  store i64 %c, i64* %2
-  ret <vscale x 64 x i8> %b
-}
-
-declare { <vscale x 64 x i8>, i64 } @llvm.riscv.vleff.mask.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>*,
-  <vscale x 64 x i1>,
-  i64,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vleff_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vle8ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 64 x i8>, i64 } @llvm.riscv.vleff.mask.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8>* %1,
-    <vscale x 64 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 64 x i8>, i64 } %a, 0
-  %c = extractvalue { <vscale x 64 x i8>, i64 } %a, 1
-  store i64 %c, i64* %4
-
-  ret <vscale x 64 x i8> %b
-}
-
-; Test with the VL output unused
-define <vscale x 1 x double> @intrinsic_vleff_dead_vl(<vscale x 1 x double>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vleff_dead_vl:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vle64ff.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x double>, i64 } @llvm.riscv.vleff.nxv1f64(
-    <vscale x 1 x double> undef,
-    <vscale x 1 x double>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 1 x double>, i64 } %a, 0
-  ret <vscale x 1 x double> %b
-}
-
-define <vscale x 1 x double> @intrinsic_vleff_mask_dead_vl(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_dead_vl:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x double>, i64 } @llvm.riscv.vleff.mask.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 1 x double>, i64 } %a, 0
-
-  ret <vscale x 1 x double> %b
-}
-
-; Test with the loaded value unused
-define void @intrinsic_vleff_dead_value(<vscale x 1 x double>* %0, i64 %1, i64* %2) nounwind {
-; CHECK-LABEL: intrinsic_vleff_dead_value:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vle64ff.v v8, (a0)
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x double>, i64 } @llvm.riscv.vleff.nxv1f64(
-    <vscale x 1 x double> undef,
-    <vscale x 1 x double>* %0,
-    i64 %1)
-  %b = extractvalue { <vscale x 1 x double>, i64 } %a, 1
-  store i64 %b, i64* %2
-  ret void
-}
-
-define void @intrinsic_vleff_mask_dead_value(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, i64 %3, i64* %4) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_dead_value:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT:    csrr a0, vl
-; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x double>, i64 } @llvm.riscv.vleff.mask.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-  %b = extractvalue { <vscale x 1 x double>, i64 } %a, 1
-  store i64 %b, i64* %4
-
-  ret void
-}
-
-; Test with both outputs dead. Make sure the vleff isn't deleted.
-define void @intrinsic_vleff_dead_all(<vscale x 1 x double>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vleff_dead_all:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vle64ff.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x double>, i64 } @llvm.riscv.vleff.nxv1f64(
-    <vscale x 1 x double> undef,
-    <vscale x 1 x double>* %0,
-    i64 %1)
-  ret void
-}
-
-define void @intrinsic_vleff_mask_dead_all(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vleff_mask_dead_all:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vle64ff.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call { <vscale x 1 x double>, i64 } @llvm.riscv.vleff.mask.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret void
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vleff.ll b/llvm/test/CodeGen/RISCV/rvv/vleff.ll
new file mode 100644
index 0000000000000..5db8417a77054
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vleff.ll
@@ -0,0 +1,2639 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+experimental-zvfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+experimental-zvfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64
+declare { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>*,
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vleff_v_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv1i64_nxv1i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV32-NEXT:    vle64ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv1i64_nxv1i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT:    vle64ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.nxv1i64(
+    <vscale x 1 x i64> undef,
+    <vscale x 1 x i64>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 1 x i64>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 1 x i64>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 1 x i64> %b
+}
+
+declare { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>*,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; RV32-NEXT:    vle64ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT:    vle64ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64>* %1,
+    <vscale x 1 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 1 x i64>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 1 x i64>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 1 x i64> %b
+}
+
+declare { <vscale x 2 x i64>, iXLen } @llvm.riscv.vleff.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>*,
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vleff_v_nxv2i64_nxv2i64(<vscale x 2 x i64>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv2i64_nxv2i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; RV32-NEXT:    vle64ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv2i64_nxv2i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT:    vle64ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 2 x i64>, iXLen } @llvm.riscv.vleff.nxv2i64(
+    <vscale x 2 x i64> undef,
+    <vscale x 2 x i64>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 2 x i64>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 2 x i64>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 2 x i64> %b
+}
+
+declare { <vscale x 2 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>*,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vleff_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv2i64_nxv2i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
+; RV32-NEXT:    vle64ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv2i64_nxv2i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT:    vle64ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 2 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64>* %1,
+    <vscale x 2 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 2 x i64>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 2 x i64>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 2 x i64> %b
+}
+
+declare { <vscale x 4 x i64>, iXLen } @llvm.riscv.vleff.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>*,
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vleff_v_nxv4i64_nxv4i64(<vscale x 4 x i64>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv4i64_nxv4i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; RV32-NEXT:    vle64ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv4i64_nxv4i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT:    vle64ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 4 x i64>, iXLen } @llvm.riscv.vleff.nxv4i64(
+    <vscale x 4 x i64> undef,
+    <vscale x 4 x i64>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 4 x i64>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 4 x i64>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 4 x i64> %b
+}
+
+declare { <vscale x 4 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>*,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vleff_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv4i64_nxv4i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV32-NEXT:    vle64ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv4i64_nxv4i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT:    vle64ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 4 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64>* %1,
+    <vscale x 4 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 4 x i64>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 4 x i64>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 4 x i64> %b
+}
+
+declare { <vscale x 8 x i64>, iXLen } @llvm.riscv.vleff.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>*,
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vleff_v_nxv8i64_nxv8i64(<vscale x 8 x i64>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv8i64_nxv8i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT:    vle64ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv8i64_nxv8i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT:    vle64ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 8 x i64>, iXLen } @llvm.riscv.vleff.nxv8i64(
+    <vscale x 8 x i64> undef,
+    <vscale x 8 x i64>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 8 x i64>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 8 x i64>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 8 x i64> %b
+}
+
+declare { <vscale x 8 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>*,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vleff_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv8i64_nxv8i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT:    vle64ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv8i64_nxv8i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT:    vle64ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 8 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64>* %1,
+    <vscale x 8 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 8 x i64>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 8 x i64>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 8 x i64> %b
+}
+
+declare { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>*,
+  iXLen);
+
+define <vscale x 1 x double> @intrinsic_vleff_v_nxv1f64_nxv1f64(<vscale x 1 x double>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv1f64_nxv1f64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV32-NEXT:    vle64ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv1f64_nxv1f64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT:    vle64ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.nxv1f64(
+    <vscale x 1 x double> undef,
+    <vscale x 1 x double>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 1 x double>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 1 x double>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 1 x double> %b
+}
+
+declare { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.mask.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>*,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x double> @intrinsic_vleff_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv1f64_nxv1f64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; RV32-NEXT:    vle64ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv1f64_nxv1f64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT:    vle64ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.mask.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double>* %1,
+    <vscale x 1 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 1 x double>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 1 x double>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 1 x double> %b
+}
+
+declare { <vscale x 2 x double>, iXLen } @llvm.riscv.vleff.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>*,
+  iXLen);
+
+define <vscale x 2 x double> @intrinsic_vleff_v_nxv2f64_nxv2f64(<vscale x 2 x double>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv2f64_nxv2f64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; RV32-NEXT:    vle64ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv2f64_nxv2f64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT:    vle64ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 2 x double>, iXLen } @llvm.riscv.vleff.nxv2f64(
+    <vscale x 2 x double> undef,
+    <vscale x 2 x double>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 2 x double>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 2 x double>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 2 x double> %b
+}
+
+declare { <vscale x 2 x double>, iXLen } @llvm.riscv.vleff.mask.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>*,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x double> @intrinsic_vleff_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv2f64_nxv2f64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
+; RV32-NEXT:    vle64ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv2f64_nxv2f64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT:    vle64ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 2 x double>, iXLen } @llvm.riscv.vleff.mask.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double>* %1,
+    <vscale x 2 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 2 x double>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 2 x double>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 2 x double> %b
+}
+
+declare { <vscale x 4 x double>, iXLen } @llvm.riscv.vleff.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>*,
+  iXLen);
+
+define <vscale x 4 x double> @intrinsic_vleff_v_nxv4f64_nxv4f64(<vscale x 4 x double>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv4f64_nxv4f64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; RV32-NEXT:    vle64ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv4f64_nxv4f64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT:    vle64ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 4 x double>, iXLen } @llvm.riscv.vleff.nxv4f64(
+    <vscale x 4 x double> undef,
+    <vscale x 4 x double>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 4 x double>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 4 x double>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 4 x double> %b
+}
+
+declare { <vscale x 4 x double>, iXLen } @llvm.riscv.vleff.mask.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>*,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x double> @intrinsic_vleff_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv4f64_nxv4f64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV32-NEXT:    vle64ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv4f64_nxv4f64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT:    vle64ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 4 x double>, iXLen } @llvm.riscv.vleff.mask.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double>* %1,
+    <vscale x 4 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 4 x double>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 4 x double>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 4 x double> %b
+}
+
+declare { <vscale x 8 x double>, iXLen } @llvm.riscv.vleff.nxv8f64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>*,
+  iXLen);
+
+define <vscale x 8 x double> @intrinsic_vleff_v_nxv8f64_nxv8f64(<vscale x 8 x double>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv8f64_nxv8f64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT:    vle64ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv8f64_nxv8f64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT:    vle64ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 8 x double>, iXLen } @llvm.riscv.vleff.nxv8f64(
+    <vscale x 8 x double> undef,
+    <vscale x 8 x double>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 8 x double>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 8 x double>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 8 x double> %b
+}
+
+declare { <vscale x 8 x double>, iXLen } @llvm.riscv.vleff.mask.nxv8f64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>*,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x double> @intrinsic_vleff_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv8f64_nxv8f64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT:    vle64ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv8f64_nxv8f64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT:    vle64ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 8 x double>, iXLen } @llvm.riscv.vleff.mask.nxv8f64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double>* %1,
+    <vscale x 8 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 8 x double>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 8 x double>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 8 x double> %b
+}
+
+declare { <vscale x 1 x i32>, iXLen } @llvm.riscv.vleff.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>*,
+  iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vleff_v_nxv1i32_nxv1i32(<vscale x 1 x i32>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv1i32_nxv1i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
+; RV32-NEXT:    vle32ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv1i32_nxv1i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
+; RV64-NEXT:    vle32ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 1 x i32>, iXLen } @llvm.riscv.vleff.nxv1i32(
+    <vscale x 1 x i32> undef,
+    <vscale x 1 x i32>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 1 x i32>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 1 x i32>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 1 x i32> %b
+}
+
+declare { <vscale x 1 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>*,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vleff_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i32_nxv1i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
+; RV32-NEXT:    vle32ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv1i32_nxv1i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
+; RV64-NEXT:    vle32ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 1 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32>* %1,
+    <vscale x 1 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 1 x i32>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 1 x i32>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 1 x i32> %b
+}
+
+declare { <vscale x 2 x i32>, iXLen } @llvm.riscv.vleff.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>*,
+  iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vleff_v_nxv2i32_nxv2i32(<vscale x 2 x i32>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv2i32_nxv2i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; RV32-NEXT:    vle32ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv2i32_nxv2i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; RV64-NEXT:    vle32ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 2 x i32>, iXLen } @llvm.riscv.vleff.nxv2i32(
+    <vscale x 2 x i32> undef,
+    <vscale x 2 x i32>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 2 x i32>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 2 x i32>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 2 x i32> %b
+}
+
+declare { <vscale x 2 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>*,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vleff_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv2i32_nxv2i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
+; RV32-NEXT:    vle32ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv2i32_nxv2i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
+; RV64-NEXT:    vle32ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 2 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32>* %1,
+    <vscale x 2 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 2 x i32>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 2 x i32>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 2 x i32> %b
+}
+
+declare { <vscale x 4 x i32>, iXLen } @llvm.riscv.vleff.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>*,
+  iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vleff_v_nxv4i32_nxv4i32(<vscale x 4 x i32>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv4i32_nxv4i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
+; RV32-NEXT:    vle32ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv4i32_nxv4i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
+; RV64-NEXT:    vle32ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 4 x i32>, iXLen } @llvm.riscv.vleff.nxv4i32(
+    <vscale x 4 x i32> undef,
+    <vscale x 4 x i32>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 4 x i32>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 4 x i32>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 4 x i32> %b
+}
+
+declare { <vscale x 4 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>*,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vleff_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv4i32_nxv4i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT:    vle32ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv4i32_nxv4i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT:    vle32ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 4 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32>* %1,
+    <vscale x 4 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 4 x i32>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 4 x i32>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 4 x i32> %b
+}
+
+declare { <vscale x 8 x i32>, iXLen } @llvm.riscv.vleff.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>*,
+  iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vleff_v_nxv8i32_nxv8i32(<vscale x 8 x i32>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv8i32_nxv8i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
+; RV32-NEXT:    vle32ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv8i32_nxv8i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT:    vle32ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 8 x i32>, iXLen } @llvm.riscv.vleff.nxv8i32(
+    <vscale x 8 x i32> undef,
+    <vscale x 8 x i32>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 8 x i32>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 8 x i32>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 8 x i32> %b
+}
+
+declare { <vscale x 8 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>*,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vleff_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv8i32_nxv8i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT:    vle32ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv8i32_nxv8i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT:    vle32ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 8 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32>* %1,
+    <vscale x 8 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 8 x i32>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 8 x i32>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 8 x i32> %b
+}
+
+declare { <vscale x 16 x i32>, iXLen } @llvm.riscv.vleff.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>*,
+  iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vleff_v_nxv16i32_nxv16i32(<vscale x 16 x i32>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv16i32_nxv16i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT:    vle32ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv16i32_nxv16i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; RV64-NEXT:    vle32ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 16 x i32>, iXLen } @llvm.riscv.vleff.nxv16i32(
+    <vscale x 16 x i32> undef,
+    <vscale x 16 x i32>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 16 x i32>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 16 x i32>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 16 x i32> %b
+}
+
+declare { <vscale x 16 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>*,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vleff_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv16i32_nxv16i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
+; RV32-NEXT:    vle32ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv16i32_nxv16i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
+; RV64-NEXT:    vle32ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 16 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32>* %1,
+    <vscale x 16 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 16 x i32>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 16 x i32>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 16 x i32> %b
+}
+
+declare { <vscale x 1 x float>, iXLen } @llvm.riscv.vleff.nxv1f32(
+  <vscale x 1 x float>,
+  <vscale x 1 x float>*,
+  iXLen);
+
+define <vscale x 1 x float> @intrinsic_vleff_v_nxv1f32_nxv1f32(<vscale x 1 x float>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv1f32_nxv1f32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
+; RV32-NEXT:    vle32ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv1f32_nxv1f32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
+; RV64-NEXT:    vle32ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 1 x float>, iXLen } @llvm.riscv.vleff.nxv1f32(
+    <vscale x 1 x float> undef,
+    <vscale x 1 x float>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 1 x float>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 1 x float>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 1 x float> %b
+}
+
+declare { <vscale x 1 x float>, iXLen } @llvm.riscv.vleff.mask.nxv1f32(
+  <vscale x 1 x float>,
+  <vscale x 1 x float>*,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x float> @intrinsic_vleff_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv1f32_nxv1f32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
+; RV32-NEXT:    vle32ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv1f32_nxv1f32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
+; RV64-NEXT:    vle32ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 1 x float>, iXLen } @llvm.riscv.vleff.mask.nxv1f32(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float>* %1,
+    <vscale x 1 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 1 x float>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 1 x float>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 1 x float> %b
+}
+
+declare { <vscale x 2 x float>, iXLen } @llvm.riscv.vleff.nxv2f32(
+  <vscale x 2 x float>,
+  <vscale x 2 x float>*,
+  iXLen);
+
+define <vscale x 2 x float> @intrinsic_vleff_v_nxv2f32_nxv2f32(<vscale x 2 x float>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv2f32_nxv2f32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; RV32-NEXT:    vle32ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv2f32_nxv2f32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; RV64-NEXT:    vle32ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 2 x float>, iXLen } @llvm.riscv.vleff.nxv2f32(
+    <vscale x 2 x float> undef,
+    <vscale x 2 x float>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 2 x float>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 2 x float>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 2 x float> %b
+}
+
+declare { <vscale x 2 x float>, iXLen } @llvm.riscv.vleff.mask.nxv2f32(
+  <vscale x 2 x float>,
+  <vscale x 2 x float>*,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x float> @intrinsic_vleff_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv2f32_nxv2f32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
+; RV32-NEXT:    vle32ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv2f32_nxv2f32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
+; RV64-NEXT:    vle32ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 2 x float>, iXLen } @llvm.riscv.vleff.mask.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float>* %1,
+    <vscale x 2 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 2 x float>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 2 x float>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 2 x float> %b
+}
+
+declare { <vscale x 4 x float>, iXLen } @llvm.riscv.vleff.nxv4f32(
+  <vscale x 4 x float>,
+  <vscale x 4 x float>*,
+  iXLen);
+
+define <vscale x 4 x float> @intrinsic_vleff_v_nxv4f32_nxv4f32(<vscale x 4 x float>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv4f32_nxv4f32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
+; RV32-NEXT:    vle32ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv4f32_nxv4f32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
+; RV64-NEXT:    vle32ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 4 x float>, iXLen } @llvm.riscv.vleff.nxv4f32(
+    <vscale x 4 x float> undef,
+    <vscale x 4 x float>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 4 x float>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 4 x float>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 4 x float> %b
+}
+
+declare { <vscale x 4 x float>, iXLen } @llvm.riscv.vleff.mask.nxv4f32(
+  <vscale x 4 x float>,
+  <vscale x 4 x float>*,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x float> @intrinsic_vleff_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv4f32_nxv4f32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT:    vle32ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv4f32_nxv4f32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT:    vle32ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 4 x float>, iXLen } @llvm.riscv.vleff.mask.nxv4f32(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float>* %1,
+    <vscale x 4 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 4 x float>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 4 x float>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 4 x float> %b
+}
+
+declare { <vscale x 8 x float>, iXLen } @llvm.riscv.vleff.nxv8f32(
+  <vscale x 8 x float>,
+  <vscale x 8 x float>*,
+  iXLen);
+
+define <vscale x 8 x float> @intrinsic_vleff_v_nxv8f32_nxv8f32(<vscale x 8 x float>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv8f32_nxv8f32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
+; RV32-NEXT:    vle32ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv8f32_nxv8f32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT:    vle32ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 8 x float>, iXLen } @llvm.riscv.vleff.nxv8f32(
+    <vscale x 8 x float> undef,
+    <vscale x 8 x float>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 8 x float>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 8 x float>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 8 x float> %b
+}
+
+declare { <vscale x 8 x float>, iXLen } @llvm.riscv.vleff.mask.nxv8f32(
+  <vscale x 8 x float>,
+  <vscale x 8 x float>*,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x float> @intrinsic_vleff_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv8f32_nxv8f32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT:    vle32ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv8f32_nxv8f32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT:    vle32ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 8 x float>, iXLen } @llvm.riscv.vleff.mask.nxv8f32(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float>* %1,
+    <vscale x 8 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 8 x float>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 8 x float>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 8 x float> %b
+}
+
+declare { <vscale x 16 x float>, iXLen } @llvm.riscv.vleff.nxv16f32(
+  <vscale x 16 x float>,
+  <vscale x 16 x float>*,
+  iXLen);
+
+define <vscale x 16 x float> @intrinsic_vleff_v_nxv16f32_nxv16f32(<vscale x 16 x float>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv16f32_nxv16f32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT:    vle32ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv16f32_nxv16f32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; RV64-NEXT:    vle32ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 16 x float>, iXLen } @llvm.riscv.vleff.nxv16f32(
+    <vscale x 16 x float> undef,
+    <vscale x 16 x float>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 16 x float>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 16 x float>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 16 x float> %b
+}
+
+declare { <vscale x 16 x float>, iXLen } @llvm.riscv.vleff.mask.nxv16f32(
+  <vscale x 16 x float>,
+  <vscale x 16 x float>*,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 16 x float> @intrinsic_vleff_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv16f32_nxv16f32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
+; RV32-NEXT:    vle32ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv16f32_nxv16f32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
+; RV64-NEXT:    vle32ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 16 x float>, iXLen } @llvm.riscv.vleff.mask.nxv16f32(
+    <vscale x 16 x float> %0,
+    <vscale x 16 x float>* %1,
+    <vscale x 16 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 16 x float>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 16 x float>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 16 x float> %b
+}
+
+declare { <vscale x 1 x i16>, iXLen } @llvm.riscv.vleff.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>*,
+  iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vleff_v_nxv1i16_nxv1i16(<vscale x 1 x i16>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv1i16_nxv1i16:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; RV32-NEXT:    vle16ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv1i16_nxv1i16:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; RV64-NEXT:    vle16ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 1 x i16>, iXLen } @llvm.riscv.vleff.nxv1i16(
+    <vscale x 1 x i16> undef,
+    <vscale x 1 x i16>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 1 x i16>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 1 x i16>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 1 x i16> %b
+}
+
+declare { <vscale x 1 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>*,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vleff_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i16_nxv1i16:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
+; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv1i16_nxv1i16:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
+; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 1 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16>* %1,
+    <vscale x 1 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 1 x i16>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 1 x i16>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 1 x i16> %b
+}
+
+declare { <vscale x 2 x i16>, iXLen } @llvm.riscv.vleff.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>*,
+  iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vleff_v_nxv2i16_nxv2i16(<vscale x 2 x i16>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv2i16_nxv2i16:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; RV32-NEXT:    vle16ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv2i16_nxv2i16:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; RV64-NEXT:    vle16ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 2 x i16>, iXLen } @llvm.riscv.vleff.nxv2i16(
+    <vscale x 2 x i16> undef,
+    <vscale x 2 x i16>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 2 x i16>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 2 x i16>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 2 x i16> %b
+}
+
+declare { <vscale x 2 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>*,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vleff_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv2i16_nxv2i16:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
+; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv2i16_nxv2i16:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
+; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 2 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16>* %1,
+    <vscale x 2 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 2 x i16>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 2 x i16>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 2 x i16> %b
+}
+
+declare { <vscale x 4 x i16>, iXLen } @llvm.riscv.vleff.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>*,
+  iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vleff_v_nxv4i16_nxv4i16(<vscale x 4 x i16>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv4i16_nxv4i16:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; RV32-NEXT:    vle16ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv4i16_nxv4i16:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; RV64-NEXT:    vle16ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 4 x i16>, iXLen } @llvm.riscv.vleff.nxv4i16(
+    <vscale x 4 x i16> undef,
+    <vscale x 4 x i16>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 4 x i16>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 4 x i16>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 4 x i16> %b
+}
+
+declare { <vscale x 4 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>*,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vleff_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv4i16_nxv4i16:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv4i16_nxv4i16:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 4 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16>* %1,
+    <vscale x 4 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 4 x i16>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 4 x i16>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 4 x i16> %b
+}
+
+declare { <vscale x 8 x i16>, iXLen } @llvm.riscv.vleff.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>*,
+  iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vleff_v_nxv8i16_nxv8i16(<vscale x 8 x i16>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv8i16_nxv8i16:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT:    vle16ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv8i16_nxv8i16:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; RV64-NEXT:    vle16ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 8 x i16>, iXLen } @llvm.riscv.vleff.nxv8i16(
+    <vscale x 8 x i16> undef,
+    <vscale x 8 x i16>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 8 x i16>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 8 x i16>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 8 x i16> %b
+}
+
+declare { <vscale x 8 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>*,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vleff_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv8i16_nxv8i16:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv8i16_nxv8i16:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 8 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16>* %1,
+    <vscale x 8 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 8 x i16>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 8 x i16>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 8 x i16> %b
+}
+
+declare { <vscale x 16 x i16>, iXLen } @llvm.riscv.vleff.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>*,
+  iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vleff_v_nxv16i16_nxv16i16(<vscale x 16 x i16>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv16i16_nxv16i16:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; RV32-NEXT:    vle16ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv16i16_nxv16i16:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; RV64-NEXT:    vle16ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 16 x i16>, iXLen } @llvm.riscv.vleff.nxv16i16(
+    <vscale x 16 x i16> undef,
+    <vscale x 16 x i16>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 16 x i16>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 16 x i16>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 16 x i16> %b
+}
+
+declare { <vscale x 16 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>*,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vleff_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv16i16_nxv16i16:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
+; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv16i16_nxv16i16:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
+; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 16 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16>* %1,
+    <vscale x 16 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 16 x i16>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 16 x i16>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 16 x i16> %b
+}
+
+declare { <vscale x 32 x i16>, iXLen } @llvm.riscv.vleff.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>*,
+  iXLen);
+
+define <vscale x 32 x i16> @intrinsic_vleff_v_nxv32i16_nxv32i16(<vscale x 32 x i16>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv32i16_nxv32i16:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; RV32-NEXT:    vle16ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv32i16_nxv32i16:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; RV64-NEXT:    vle16ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 32 x i16>, iXLen } @llvm.riscv.vleff.nxv32i16(
+    <vscale x 32 x i16> undef,
+    <vscale x 32 x i16>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 32 x i16>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 32 x i16>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 32 x i16> %b
+}
+
+declare { <vscale x 32 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>*,
+  <vscale x 32 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 32 x i16> @intrinsic_vleff_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv32i16_nxv32i16:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
+; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv32i16_nxv32i16:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
+; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 32 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16>* %1,
+    <vscale x 32 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 32 x i16>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 32 x i16>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 32 x i16> %b
+}
+
+declare { <vscale x 1 x half>, iXLen } @llvm.riscv.vleff.nxv1f16(
+  <vscale x 1 x half>,
+  <vscale x 1 x half>*,
+  iXLen);
+
+define <vscale x 1 x half> @intrinsic_vleff_v_nxv1half_nxv1f16(<vscale x 1 x half>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv1half_nxv1f16:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; RV32-NEXT:    vle16ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv1half_nxv1f16:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; RV64-NEXT:    vle16ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 1 x half>, iXLen } @llvm.riscv.vleff.nxv1f16(
+    <vscale x 1 x half> undef,
+    <vscale x 1 x half>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 1 x half>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 1 x half>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 1 x half> %b
+}
+
+declare { <vscale x 1 x half>, iXLen } @llvm.riscv.vleff.mask.nxv1f16(
+  <vscale x 1 x half>,
+  <vscale x 1 x half>*,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x half> @intrinsic_vleff_mask_v_nxv1half_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv1half_nxv1f16:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
+; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv1half_nxv1f16:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
+; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 1 x half>, iXLen } @llvm.riscv.vleff.mask.nxv1f16(
+    <vscale x 1 x half> %0,
+    <vscale x 1 x half>* %1,
+    <vscale x 1 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 1 x half>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 1 x half>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 1 x half> %b
+}
+
+declare { <vscale x 2 x half>, iXLen } @llvm.riscv.vleff.nxv2f16(
+  <vscale x 2 x half>,
+  <vscale x 2 x half>*,
+  iXLen);
+
+define <vscale x 2 x half> @intrinsic_vleff_v_nxv2half_nxv2f16(<vscale x 2 x half>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv2half_nxv2f16:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; RV32-NEXT:    vle16ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv2half_nxv2f16:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; RV64-NEXT:    vle16ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 2 x half>, iXLen } @llvm.riscv.vleff.nxv2f16(
+    <vscale x 2 x half> undef,
+    <vscale x 2 x half>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 2 x half>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 2 x half>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 2 x half> %b
+}
+
+declare { <vscale x 2 x half>, iXLen } @llvm.riscv.vleff.mask.nxv2f16(
+  <vscale x 2 x half>,
+  <vscale x 2 x half>*,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x half> @intrinsic_vleff_mask_v_nxv2half_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv2half_nxv2f16:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
+; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv2half_nxv2f16:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
+; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 2 x half>, iXLen } @llvm.riscv.vleff.mask.nxv2f16(
+    <vscale x 2 x half> %0,
+    <vscale x 2 x half>* %1,
+    <vscale x 2 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 2 x half>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 2 x half>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 2 x half> %b
+}
+
+declare { <vscale x 4 x half>, iXLen } @llvm.riscv.vleff.nxv4f16(
+  <vscale x 4 x half>,
+  <vscale x 4 x half>*,
+  iXLen);
+
+define <vscale x 4 x half> @intrinsic_vleff_v_nxv4half_nxv4f16(<vscale x 4 x half>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv4half_nxv4f16:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; RV32-NEXT:    vle16ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv4half_nxv4f16:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; RV64-NEXT:    vle16ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 4 x half>, iXLen } @llvm.riscv.vleff.nxv4f16(
+    <vscale x 4 x half> undef,
+    <vscale x 4 x half>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 4 x half>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 4 x half>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 4 x half> %b
+}
+
+declare { <vscale x 4 x half>, iXLen } @llvm.riscv.vleff.mask.nxv4f16(
+  <vscale x 4 x half>,
+  <vscale x 4 x half>*,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x half> @intrinsic_vleff_mask_v_nxv4half_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv4half_nxv4f16:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv4half_nxv4f16:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 4 x half>, iXLen } @llvm.riscv.vleff.mask.nxv4f16(
+    <vscale x 4 x half> %0,
+    <vscale x 4 x half>* %1,
+    <vscale x 4 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 4 x half>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 4 x half>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 4 x half> %b
+}
+
+declare { <vscale x 8 x half>, iXLen } @llvm.riscv.vleff.nxv8f16(
+  <vscale x 8 x half>,
+  <vscale x 8 x half>*,
+  iXLen);
+
+define <vscale x 8 x half> @intrinsic_vleff_v_nxv8half_nxv8f16(<vscale x 8 x half>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv8half_nxv8f16:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT:    vle16ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv8half_nxv8f16:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; RV64-NEXT:    vle16ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 8 x half>, iXLen } @llvm.riscv.vleff.nxv8f16(
+    <vscale x 8 x half> undef,
+    <vscale x 8 x half>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 8 x half>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 8 x half>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 8 x half> %b
+}
+
+declare { <vscale x 8 x half>, iXLen } @llvm.riscv.vleff.mask.nxv8f16(
+  <vscale x 8 x half>,
+  <vscale x 8 x half>*,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x half> @intrinsic_vleff_mask_v_nxv8half_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv8half_nxv8f16:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv8half_nxv8f16:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 8 x half>, iXLen } @llvm.riscv.vleff.mask.nxv8f16(
+    <vscale x 8 x half> %0,
+    <vscale x 8 x half>* %1,
+    <vscale x 8 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 8 x half>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 8 x half>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 8 x half> %b
+}
+
+declare { <vscale x 16 x half>, iXLen } @llvm.riscv.vleff.nxv16f16(
+  <vscale x 16 x half>,
+  <vscale x 16 x half>*,
+  iXLen);
+
+define <vscale x 16 x half> @intrinsic_vleff_v_nxv16half_nxv16f16(<vscale x 16 x half>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv16half_nxv16f16:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; RV32-NEXT:    vle16ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv16half_nxv16f16:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; RV64-NEXT:    vle16ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 16 x half>, iXLen } @llvm.riscv.vleff.nxv16f16(
+    <vscale x 16 x half> undef,
+    <vscale x 16 x half>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 16 x half>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 16 x half>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 16 x half> %b
+}
+
+declare { <vscale x 16 x half>, iXLen } @llvm.riscv.vleff.mask.nxv16f16(
+  <vscale x 16 x half>,
+  <vscale x 16 x half>*,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 16 x half> @intrinsic_vleff_mask_v_nxv16half_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv16half_nxv16f16:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
+; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv16half_nxv16f16:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
+; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 16 x half>, iXLen } @llvm.riscv.vleff.mask.nxv16f16(
+    <vscale x 16 x half> %0,
+    <vscale x 16 x half>* %1,
+    <vscale x 16 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 16 x half>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 16 x half>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 16 x half> %b
+}
+
+declare { <vscale x 32 x half>, iXLen } @llvm.riscv.vleff.nxv32f16(
+  <vscale x 32 x half>,
+  <vscale x 32 x half>*,
+  iXLen);
+
+define <vscale x 32 x half> @intrinsic_vleff_v_nxv32half_nxv32f16(<vscale x 32 x half>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv32half_nxv32f16:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; RV32-NEXT:    vle16ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv32half_nxv32f16:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; RV64-NEXT:    vle16ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 32 x half>, iXLen } @llvm.riscv.vleff.nxv32f16(
+    <vscale x 32 x half> undef,
+    <vscale x 32 x half>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 32 x half>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 32 x half>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 32 x half> %b
+}
+
+declare { <vscale x 32 x half>, iXLen } @llvm.riscv.vleff.mask.nxv32f16(
+  <vscale x 32 x half>,
+  <vscale x 32 x half>*,
+  <vscale x 32 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 32 x half> @intrinsic_vleff_mask_v_nxv32half_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv32half_nxv32f16:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
+; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv32half_nxv32f16:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
+; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 32 x half>, iXLen } @llvm.riscv.vleff.mask.nxv32f16(
+    <vscale x 32 x half> %0,
+    <vscale x 32 x half>* %1,
+    <vscale x 32 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 32 x half>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 32 x half>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 32 x half> %b
+}
+
+declare { <vscale x 1 x i8>, iXLen } @llvm.riscv.vleff.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>*,
+  iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vleff_v_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv1i8_nxv1i8:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; RV32-NEXT:    vle8ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv1i8_nxv1i8:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; RV64-NEXT:    vle8ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 1 x i8>, iXLen } @llvm.riscv.vleff.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 1 x i8>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 1 x i8>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 1 x i8> %b
+}
+
+declare { <vscale x 1 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>*,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vleff_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i8_nxv1i8:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
+; RV32-NEXT:    vle8ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv1i8_nxv1i8:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
+; RV64-NEXT:    vle8ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 1 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8>* %1,
+    <vscale x 1 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 1 x i8>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 1 x i8>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 1 x i8> %b
+}
+
+declare { <vscale x 2 x i8>, iXLen } @llvm.riscv.vleff.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>*,
+  iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vleff_v_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv2i8_nxv2i8:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; RV32-NEXT:    vle8ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv2i8_nxv2i8:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; RV64-NEXT:    vle8ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 2 x i8>, iXLen } @llvm.riscv.vleff.nxv2i8(
+    <vscale x 2 x i8> undef,
+    <vscale x 2 x i8>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 2 x i8>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 2 x i8>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 2 x i8> %b
+}
+
+declare { <vscale x 2 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>*,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vleff_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv2i8_nxv2i8:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
+; RV32-NEXT:    vle8ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv2i8_nxv2i8:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
+; RV64-NEXT:    vle8ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 2 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8>* %1,
+    <vscale x 2 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 2 x i8>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 2 x i8>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 2 x i8> %b
+}
+
+declare { <vscale x 4 x i8>, iXLen } @llvm.riscv.vleff.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>*,
+  iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vleff_v_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv4i8_nxv4i8:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; RV32-NEXT:    vle8ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv4i8_nxv4i8:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; RV64-NEXT:    vle8ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 4 x i8>, iXLen } @llvm.riscv.vleff.nxv4i8(
+    <vscale x 4 x i8> undef,
+    <vscale x 4 x i8>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 4 x i8>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 4 x i8>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 4 x i8> %b
+}
+
+declare { <vscale x 4 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>*,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vleff_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv4i8_nxv4i8:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
+; RV32-NEXT:    vle8ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv4i8_nxv4i8:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
+; RV64-NEXT:    vle8ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 4 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8>* %1,
+    <vscale x 4 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 4 x i8>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 4 x i8>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 4 x i8> %b
+}
+
+declare { <vscale x 8 x i8>, iXLen } @llvm.riscv.vleff.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>*,
+  iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vleff_v_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv8i8_nxv8i8:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; RV32-NEXT:    vle8ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv8i8_nxv8i8:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; RV64-NEXT:    vle8ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 8 x i8>, iXLen } @llvm.riscv.vleff.nxv8i8(
+    <vscale x 8 x i8> undef,
+    <vscale x 8 x i8>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 8 x i8>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 8 x i8>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 8 x i8> %b
+}
+
+declare { <vscale x 8 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>*,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vleff_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv8i8_nxv8i8:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
+; RV32-NEXT:    vle8ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv8i8_nxv8i8:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
+; RV64-NEXT:    vle8ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 8 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8>* %1,
+    <vscale x 8 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 8 x i8>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 8 x i8>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 8 x i8> %b
+}
+
+declare { <vscale x 16 x i8>, iXLen } @llvm.riscv.vleff.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>*,
+  iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vleff_v_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv16i8_nxv16i8:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; RV32-NEXT:    vle8ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv16i8_nxv16i8:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; RV64-NEXT:    vle8ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 16 x i8>, iXLen } @llvm.riscv.vleff.nxv16i8(
+    <vscale x 16 x i8> undef,
+    <vscale x 16 x i8>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 16 x i8>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 16 x i8>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 16 x i8> %b
+}
+
+declare { <vscale x 16 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>*,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vleff_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv16i8_nxv16i8:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
+; RV32-NEXT:    vle8ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv16i8_nxv16i8:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
+; RV64-NEXT:    vle8ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 16 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8>* %1,
+    <vscale x 16 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 16 x i8>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 16 x i8>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 16 x i8> %b
+}
+
+declare { <vscale x 32 x i8>, iXLen } @llvm.riscv.vleff.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>*,
+  iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vleff_v_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv32i8_nxv32i8:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; RV32-NEXT:    vle8ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv32i8_nxv32i8:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; RV64-NEXT:    vle8ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 32 x i8>, iXLen } @llvm.riscv.vleff.nxv32i8(
+    <vscale x 32 x i8> undef,
+    <vscale x 32 x i8>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 32 x i8>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 32 x i8>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 32 x i8> %b
+}
+
+declare { <vscale x 32 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>*,
+  <vscale x 32 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vleff_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv32i8_nxv32i8:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
+; RV32-NEXT:    vle8ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv32i8_nxv32i8:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
+; RV64-NEXT:    vle8ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 32 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8>* %1,
+    <vscale x 32 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 32 x i8>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 32 x i8>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 32 x i8> %b
+}
+
+declare { <vscale x 64 x i8>, iXLen } @llvm.riscv.vleff.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>*,
+  iXLen);
+
+define <vscale x 64 x i8> @intrinsic_vleff_v_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv64i8_nxv64i8:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; RV32-NEXT:    vle8ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv64i8_nxv64i8:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; RV64-NEXT:    vle8ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 64 x i8>, iXLen } @llvm.riscv.vleff.nxv64i8(
+    <vscale x 64 x i8> undef,
+    <vscale x 64 x i8>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 64 x i8>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 64 x i8>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 64 x i8> %b
+}
+
+declare { <vscale x 64 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>*,
+  <vscale x 64 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 64 x i8> @intrinsic_vleff_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv64i8_nxv64i8:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
+; RV32-NEXT:    vle8ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv64i8_nxv64i8:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
+; RV64-NEXT:    vle8ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 64 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8>* %1,
+    <vscale x 64 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 64 x i8>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 64 x i8>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 64 x i8> %b
+}
+
+; Test with the VL output unused
+define <vscale x 1 x double> @intrinsic_vleff_dead_vl(<vscale x 1 x double>* %0, iXLen %1, iXLen* %2) nounwind {
+; CHECK-LABEL: intrinsic_vleff_dead_vl:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT:    vle64ff.v v8, (a0)
+; CHECK-NEXT:    ret
+entry:
+  %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.nxv1f64(
+    <vscale x 1 x double> undef,
+    <vscale x 1 x double>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 1 x double>, iXLen } %a, 0
+  ret <vscale x 1 x double> %b
+}
+
+define <vscale x 1 x double> @intrinsic_vleff_mask_dead_vl(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_dead_vl:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT:    vle64ff.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.mask.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double>* %1,
+    <vscale x 1 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 1 x double>, iXLen } %a, 0
+
+  ret <vscale x 1 x double> %b
+}
+
+; Test with the loaded value unused
+define void @intrinsic_vleff_dead_value(<vscale x 1 x double>* %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_dead_value:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV32-NEXT:    vle64ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_dead_value:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT:    vle64ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.nxv1f64(
+    <vscale x 1 x double> undef,
+    <vscale x 1 x double>* %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 1 x double>, iXLen } %a, 1
+  store iXLen %b, iXLen* %2
+  ret void
+}
+
+define void @intrinsic_vleff_mask_dead_value(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_dead_value:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; RV32-NEXT:    vle64ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_dead_value:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT:    vle64ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.mask.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double>* %1,
+    <vscale x 1 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 1 x double>, iXLen } %a, 1
+  store iXLen %b, iXLen* %4
+
+  ret void
+}
+
+; Test with both outputs dead. Make sure the vleff isn't deleted.
+define void @intrinsic_vleff_dead_all(<vscale x 1 x double>* %0, iXLen %1, iXLen* %2) nounwind {
+; CHECK-LABEL: intrinsic_vleff_dead_all:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT:    vle64ff.v v8, (a0)
+; CHECK-NEXT:    ret
+entry:
+  %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.nxv1f64(
+    <vscale x 1 x double> undef,
+    <vscale x 1 x double>* %0,
+    iXLen %1)
+  ret void
+}
+
+define void @intrinsic_vleff_mask_dead_all(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_dead_all:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT:    vle64ff.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.mask.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double>* %1,
+    <vscale x 1 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret void
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll
deleted file mode 100644
index 0b7dc24570908..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll
+++ /dev/null
@@ -1,1772 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i32,
-  i32
-);
-
-define <vscale x 1 x i8>  @intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
-; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i32, i32);
-
-define <vscale x 1 x i8>  @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i32,
-  i32
-);
-
-define <vscale x 2 x i8>  @intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
-; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i32, i32);
-
-define <vscale x 2 x i8>  @intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
-; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i32,
-  i32
-);
-
-define <vscale x 4 x i8>  @intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
-; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i32, i32);
-
-define <vscale x 4 x i8>  @intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
-; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i32,
-  i32
-);
-
-define <vscale x 8 x i8>  @intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
-; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i32, i32);
-
-define <vscale x 8 x i8>  @intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
-; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i32,
-  i32
-);
-
-define <vscale x 16 x i8>  @intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
-; CHECK-NEXT:    vmacc.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i32, i32);
-
-define <vscale x 16 x i8>  @intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
-; CHECK-NEXT:    vmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i32,
-  i32
-);
-
-define <vscale x 32 x i8>  @intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
-; CHECK-NEXT:    vmacc.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i32, i32);
-
-define <vscale x 32 x i8>  @intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
-; CHECK-NEXT:    vmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i32,
-  i32
-);
-
-define <vscale x 1 x i16>  @intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
-; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i32, i32);
-
-define <vscale x 1 x i16>  @intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i32,
-  i32
-);
-
-define <vscale x 2 x i16>  @intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
-; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i32, i32);
-
-define <vscale x 2 x i16>  @intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i32,
-  i32
-);
-
-define <vscale x 4 x i16>  @intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
-; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i32, i32);
-
-define <vscale x 4 x i16>  @intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i32,
-  i32
-);
-
-define <vscale x 8 x i16>  @intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
-; CHECK-NEXT:    vmacc.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i32, i32);
-
-define <vscale x 8 x i16>  @intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i32,
-  i32
-);
-
-define <vscale x 16 x i16>  @intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
-; CHECK-NEXT:    vmacc.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i32, i32);
-
-define <vscale x 16 x i16>  @intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32,
-  i32
-);
-
-define <vscale x 1 x i32>  @intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
-; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32, i32);
-
-define <vscale x 1 x i32>  @intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32,
-  i32
-);
-
-define <vscale x 2 x i32>  @intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
-; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32, i32);
-
-define <vscale x 2 x i32>  @intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32,
-  i32
-);
-
-define <vscale x 4 x i32>  @intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
-; CHECK-NEXT:    vmacc.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32, i32);
-
-define <vscale x 4 x i32>  @intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32,
-  i32
-);
-
-define <vscale x 8 x i32>  @intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
-; CHECK-NEXT:    vmacc.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32, i32);
-
-define <vscale x 8 x i32>  @intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i32,
-  i32
-);
-
-define <vscale x 1 x i64>  @intrinsic_vmacc_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
-; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32, i32);
-
-define <vscale x 1 x i64>  @intrinsic_vmacc_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i32,
-  i32
-);
-
-define <vscale x 2 x i64>  @intrinsic_vmacc_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
-; CHECK-NEXT:    vmacc.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32, i32);
-
-define <vscale x 2 x i64>  @intrinsic_vmacc_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i32,
-  i32
-);
-
-define <vscale x 4 x i64>  @intrinsic_vmacc_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
-; CHECK-NEXT:    vmacc.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32, i32);
-
-define <vscale x 4 x i64>  @intrinsic_vmacc_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i8>,
-  i32,
-  i32
-);
-
-define <vscale x 1 x i8>  @intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
-; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    <vscale x 1 x i8> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i32, i32);
-
-define <vscale x 1 x i8> @intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
-; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i8>,
-  i32,
-  i32
-);
-
-define <vscale x 2 x i8>  @intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
-; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    <vscale x 2 x i8> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i32, i32);
-
-define <vscale x 2 x i8> @intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
-; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i8>,
-  i32,
-  i32
-);
-
-define <vscale x 4 x i8>  @intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, ma
-; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    <vscale x 4 x i8> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i32, i32);
-
-define <vscale x 4 x i8> @intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
-; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i8>,
-  i32,
-  i32
-);
-
-define <vscale x 8 x i8>  @intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, ma
-; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    <vscale x 8 x i8> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i32, i32);
-
-define <vscale x 8 x i8> @intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
-; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i8>,
-  i32,
-  i32
-);
-
-define <vscale x 16 x i8>  @intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, ma
-; CHECK-NEXT:    vmacc.vx v8, a0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    <vscale x 16 x i8> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i32, i32);
-
-define <vscale x 16 x i8> @intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
-; CHECK-NEXT:    vmacc.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i8>,
-  i32,
-  i32
-);
-
-define <vscale x 32 x i8>  @intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, ma
-; CHECK-NEXT:    vmacc.vx v8, a0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    <vscale x 32 x i8> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i32, i32);
-
-define <vscale x 32 x i8> @intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
-; CHECK-NEXT:    vmacc.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i16>,
-  i32,
-  i32
-);
-
-define <vscale x 1 x i16>  @intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
-; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    <vscale x 1 x i16> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i32, i32);
-
-define <vscale x 1 x i16> @intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
-; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i16>,
-  i32,
-  i32
-);
-
-define <vscale x 2 x i16>  @intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
-; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    <vscale x 2 x i16> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i32, i32);
-
-define <vscale x 2 x i16> @intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
-; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i16>,
-  i32,
-  i32
-);
-
-define <vscale x 4 x i16>  @intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
-; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    <vscale x 4 x i16> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i32, i32);
-
-define <vscale x 4 x i16> @intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
-; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i16>,
-  i32,
-  i32
-);
-
-define <vscale x 8 x i16>  @intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, ma
-; CHECK-NEXT:    vmacc.vx v8, a0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    <vscale x 8 x i16> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i32, i32);
-
-define <vscale x 8 x i16> @intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
-; CHECK-NEXT:    vmacc.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i16>,
-  i32,
-  i32
-);
-
-define <vscale x 16 x i16>  @intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, ma
-; CHECK-NEXT:    vmacc.vx v8, a0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    <vscale x 16 x i16> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i32, i32);
-
-define <vscale x 16 x i16> @intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
-; CHECK-NEXT:    vmacc.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i32>,
-  i32,
-  i32
-);
-
-define <vscale x 1 x i32>  @intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
-; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    <vscale x 1 x i32> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32, i32);
-
-define <vscale x 1 x i32> @intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i32>,
-  i32,
-  i32
-);
-
-define <vscale x 2 x i32>  @intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
-; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    <vscale x 2 x i32> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32, i32);
-
-define <vscale x 2 x i32> @intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i32>,
-  i32,
-  i32
-);
-
-define <vscale x 4 x i32>  @intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
-; CHECK-NEXT:    vmacc.vx v8, a0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    <vscale x 4 x i32> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32, i32);
-
-define <vscale x 4 x i32> @intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vmacc.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i32>,
-  i32,
-  i32
-);
-
-define <vscale x 8 x i32>  @intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, ma
-; CHECK-NEXT:    vmacc.vx v8, a0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    <vscale x 8 x i32> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32, i32);
-
-define <vscale x 8 x i32> @intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vmacc.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  i64,
-  <vscale x 1 x i64>,
-  i32,
-  i32
-);
-
-define <vscale x 1 x i64>  @intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vsetvli zero, zero, e64, m1, tu, ma
-; CHECK-NEXT:    vmacc.vv v8, v10, v9
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    i64 %1,
-    <vscale x 1 x i64> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  i64,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32, i32);
-
-define <vscale x 1 x i64> @intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vsetvli zero, zero, e64, m1, tu, mu
-; CHECK-NEXT:    vmacc.vv v8, v10, v9, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    i64 %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  i64,
-  <vscale x 2 x i64>,
-  i32,
-  i32
-);
-
-define <vscale x 2 x i64>  @intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vsetvli zero, zero, e64, m2, tu, ma
-; CHECK-NEXT:    vmacc.vv v8, v12, v10
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    i64 %1,
-    <vscale x 2 x i64> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  i64,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32, i32);
-
-define <vscale x 2 x i64> @intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vsetvli zero, zero, e64, m2, tu, mu
-; CHECK-NEXT:    vmacc.vv v8, v12, v10, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    i64 %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  i64,
-  <vscale x 4 x i64>,
-  i32,
-  i32
-);
-
-define <vscale x 4 x i64>  @intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vsetvli zero, zero, e64, m4, tu, ma
-; CHECK-NEXT:    vmacc.vv v8, v16, v12
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    i64 %1,
-    <vscale x 4 x i64> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  i64,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32, i32);
-
-define <vscale x 4 x i64> @intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vsetvli zero, zero, e64, m4, tu, mu
-; CHECK-NEXT:    vmacc.vv v8, v16, v12, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    i64 %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 4 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmacc.ll
similarity index 80%
rename from llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll
rename to llvm/test/CodeGen/RISCV/rvv/vmacc.ll
index a41f679eebde4..d1151996e7ca3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmacc.ll
@@ -1,14 +1,16 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
 declare <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8>  @intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+define <vscale x 1 x i8>  @intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
@@ -19,7 +21,7 @@ entry:
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -29,9 +31,9 @@ declare <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 1 x i8>  @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i8>  @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
@@ -43,7 +45,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -52,10 +54,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8>  @intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+define <vscale x 2 x i8>  @intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
@@ -66,7 +68,7 @@ entry:
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -76,9 +78,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 2 x i8>  @intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i8>  @intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
@@ -90,7 +92,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -99,10 +101,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8>  @intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+define <vscale x 4 x i8>  @intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
@@ -113,7 +115,7 @@ entry:
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -123,9 +125,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 4 x i8>  @intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i8>  @intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
@@ -137,7 +139,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -146,10 +148,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8>  @intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+define <vscale x 8 x i8>  @intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
@@ -160,7 +162,7 @@ entry:
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -170,9 +172,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 8 x i8>  @intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i8>  @intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
@@ -184,7 +186,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -193,10 +195,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8>  @intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
+define <vscale x 16 x i8>  @intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
@@ -207,7 +209,7 @@ entry:
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -217,9 +219,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 16 x i8>  @intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i8>  @intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
@@ -231,7 +233,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -240,10 +242,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8>  @intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
+define <vscale x 32 x i8>  @intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
@@ -254,7 +256,7 @@ entry:
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -264,9 +266,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 32 x i8>  @intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i8>  @intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
@@ -278,7 +280,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -287,10 +289,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16>  @intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+define <vscale x 1 x i16>  @intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
@@ -301,7 +303,7 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -311,9 +313,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 1 x i16>  @intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i16>  @intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -325,7 +327,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -334,10 +336,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16>  @intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+define <vscale x 2 x i16>  @intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
@@ -348,7 +350,7 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -358,9 +360,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 2 x i16>  @intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i16>  @intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -372,7 +374,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -381,10 +383,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16>  @intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+define <vscale x 4 x i16>  @intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
@@ -395,7 +397,7 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -405,9 +407,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 4 x i16>  @intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16>  @intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -419,7 +421,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -428,10 +430,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16>  @intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+define <vscale x 8 x i16>  @intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
@@ -442,7 +444,7 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -452,9 +454,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 8 x i16>  @intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i16>  @intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -466,7 +468,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -475,10 +477,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16>  @intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
+define <vscale x 16 x i16>  @intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
@@ -489,7 +491,7 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -499,9 +501,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 16 x i16>  @intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i16>  @intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -513,7 +515,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -522,10 +524,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32>  @intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+define <vscale x 1 x i32>  @intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
@@ -536,7 +538,7 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -546,9 +548,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 1 x i32>  @intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i32>  @intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -560,7 +562,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -569,10 +571,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32>  @intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+define <vscale x 2 x i32>  @intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
@@ -583,7 +585,7 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -593,9 +595,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 2 x i32>  @intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32>  @intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -607,7 +609,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -616,10 +618,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32>  @intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+define <vscale x 4 x i32>  @intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
@@ -630,7 +632,7 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -640,9 +642,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 4 x i32>  @intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i32>  @intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -654,7 +656,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -663,10 +665,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32>  @intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+define <vscale x 8 x i32>  @intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
@@ -677,7 +679,7 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -687,9 +689,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 8 x i32>  @intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i32>  @intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -701,7 +703,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -710,10 +712,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64>  @intrinsic_vmacc_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+define <vscale x 1 x i64>  @intrinsic_vmacc_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
@@ -724,7 +726,7 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -734,9 +736,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 1 x i64>  @intrinsic_vmacc_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i64>  @intrinsic_vmacc_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -748,7 +750,7 @@ entry:
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -757,10 +759,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64>  @intrinsic_vmacc_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+define <vscale x 2 x i64>  @intrinsic_vmacc_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
@@ -771,7 +773,7 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -781,9 +783,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 2 x i64>  @intrinsic_vmacc_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i64>  @intrinsic_vmacc_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -795,7 +797,7 @@ entry:
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -804,10 +806,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64>  @intrinsic_vmacc_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+define <vscale x 4 x i64>  @intrinsic_vmacc_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
@@ -818,7 +820,7 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -828,9 +830,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 4 x i64>  @intrinsic_vmacc_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i64>  @intrinsic_vmacc_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -842,7 +844,7 @@ entry:
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -851,10 +853,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i8>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8>  @intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+define <vscale x 1 x i8>  @intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
@@ -865,7 +867,7 @@ entry:
     <vscale x 1 x i8> %0,
     i8 %1,
     <vscale x 1 x i8> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -875,9 +877,9 @@ declare <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8(
   i8,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
@@ -889,7 +891,7 @@ entry:
     i8 %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -898,10 +900,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i8>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8>  @intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+define <vscale x 2 x i8>  @intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
@@ -912,7 +914,7 @@ entry:
     <vscale x 2 x i8> %0,
     i8 %1,
     <vscale x 2 x i8> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -922,9 +924,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8(
   i8,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
@@ -936,7 +938,7 @@ entry:
     i8 %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -945,10 +947,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i8>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8>  @intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+define <vscale x 4 x i8>  @intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, ma
@@ -959,7 +961,7 @@ entry:
     <vscale x 4 x i8> %0,
     i8 %1,
     <vscale x 4 x i8> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -969,9 +971,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8(
   i8,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
@@ -983,7 +985,7 @@ entry:
     i8 %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -992,10 +994,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i8>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8>  @intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+define <vscale x 8 x i8>  @intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, ma
@@ -1006,7 +1008,7 @@ entry:
     <vscale x 8 x i8> %0,
     i8 %1,
     <vscale x 8 x i8> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1016,9 +1018,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8(
   i8,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
@@ -1030,7 +1032,7 @@ entry:
     i8 %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1039,10 +1041,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i8>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8>  @intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
+define <vscale x 16 x i8>  @intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, ma
@@ -1053,7 +1055,7 @@ entry:
     <vscale x 16 x i8> %0,
     i8 %1,
     <vscale x 16 x i8> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1063,9 +1065,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8(
   i8,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
@@ -1077,7 +1079,7 @@ entry:
     i8 %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1086,10 +1088,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i8>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8>  @intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
+define <vscale x 32 x i8>  @intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, ma
@@ -1100,7 +1102,7 @@ entry:
     <vscale x 32 x i8> %0,
     i8 %1,
     <vscale x 32 x i8> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1110,9 +1112,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8(
   i8,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
@@ -1124,7 +1126,7 @@ entry:
     i8 %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1133,10 +1135,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i16>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16>  @intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+define <vscale x 1 x i16>  @intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
@@ -1147,7 +1149,7 @@ entry:
     <vscale x 1 x i16> %0,
     i16 %1,
     <vscale x 1 x i16> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1157,9 +1159,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16(
   i16,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
@@ -1171,7 +1173,7 @@ entry:
     i16 %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1180,10 +1182,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i16>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16>  @intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+define <vscale x 2 x i16>  @intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
@@ -1194,7 +1196,7 @@ entry:
     <vscale x 2 x i16> %0,
     i16 %1,
     <vscale x 2 x i16> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1204,9 +1206,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16(
   i16,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
@@ -1218,7 +1220,7 @@ entry:
     i16 %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1227,10 +1229,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i16>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16>  @intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+define <vscale x 4 x i16>  @intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
@@ -1241,7 +1243,7 @@ entry:
     <vscale x 4 x i16> %0,
     i16 %1,
     <vscale x 4 x i16> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1251,9 +1253,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16(
   i16,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
@@ -1265,7 +1267,7 @@ entry:
     i16 %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1274,10 +1276,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i16>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16>  @intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+define <vscale x 8 x i16>  @intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, ma
@@ -1288,7 +1290,7 @@ entry:
     <vscale x 8 x i16> %0,
     i16 %1,
     <vscale x 8 x i16> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1298,9 +1300,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16(
   i16,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
@@ -1312,7 +1314,7 @@ entry:
     i16 %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1321,10 +1323,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i16>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16>  @intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
+define <vscale x 16 x i16>  @intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, ma
@@ -1335,7 +1337,7 @@ entry:
     <vscale x 16 x i16> %0,
     i16 %1,
     <vscale x 16 x i16> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1345,9 +1347,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16(
   i16,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
@@ -1359,7 +1361,7 @@ entry:
     i16 %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1368,10 +1370,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i32>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32>  @intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+define <vscale x 1 x i32>  @intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
@@ -1382,7 +1384,7 @@ entry:
     <vscale x 1 x i32> %0,
     i32 %1,
     <vscale x 1 x i32> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1392,9 +1394,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32(
   i32,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
@@ -1406,7 +1408,7 @@ entry:
     i32 %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1415,10 +1417,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i32>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32>  @intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+define <vscale x 2 x i32>  @intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
@@ -1429,7 +1431,7 @@ entry:
     <vscale x 2 x i32> %0,
     i32 %1,
     <vscale x 2 x i32> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1439,9 +1441,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32(
   i32,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
@@ -1453,7 +1455,7 @@ entry:
     i32 %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1462,10 +1464,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i32>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32>  @intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+define <vscale x 4 x i32>  @intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
@@ -1476,7 +1478,7 @@ entry:
     <vscale x 4 x i32> %0,
     i32 %1,
     <vscale x 4 x i32> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1486,9 +1488,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32(
   i32,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
@@ -1500,7 +1502,7 @@ entry:
     i32 %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1509,10 +1511,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i32>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32>  @intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+define <vscale x 8 x i32>  @intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, ma
@@ -1523,7 +1525,7 @@ entry:
     <vscale x 8 x i32> %0,
     i32 %1,
     <vscale x 8 x i32> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1533,9 +1535,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32(
   i32,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
@@ -1547,7 +1549,7 @@ entry:
     i32 %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1556,21 +1558,34 @@ declare <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
   <vscale x 1 x i64>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64>  @intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
-; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i64>  @intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vsetvli zero, zero, e64, m1, tu, ma
+; RV32-NEXT:    vmacc.vv v8, v10, v9
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
+; RV64-NEXT:    vmacc.vx v8, a0, v9
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 %1,
     <vscale x 1 x i64> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1580,21 +1595,34 @@ declare <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64(
   i64,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i64> @intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
+  iXLen, iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vsetvli zero, zero, e64, m1, tu, mu
+; RV32-NEXT:    vmacc.vv v8, v10, v9, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
+; RV64-NEXT:    vmacc.vx v8, a0, v9, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1603,21 +1631,34 @@ declare <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
   <vscale x 2 x i64>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64>  @intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, ma
-; CHECK-NEXT:    vmacc.vx v8, a0, v10
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i64>  @intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vsetvli zero, zero, e64, m2, tu, ma
+; RV32-NEXT:    vmacc.vv v8, v12, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, tu, ma
+; RV64-NEXT:    vmacc.vx v8, a0, v10
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 %1,
     <vscale x 2 x i64> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1627,21 +1668,34 @@ declare <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64(
   i64,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i64> @intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vmacc.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    ret
+  iXLen, iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vsetvli zero, zero, e64, m2, tu, mu
+; RV32-NEXT:    vmacc.vv v8, v12, v10, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
+; RV64-NEXT:    vmacc.vx v8, a0, v10, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1650,21 +1704,34 @@ declare <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
   <vscale x 4 x i64>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64>  @intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, ma
-; CHECK-NEXT:    vmacc.vx v8, a0, v12
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i64>  @intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vsetvli zero, zero, e64, m4, tu, ma
+; RV32-NEXT:    vmacc.vv v8, v16, v12
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, tu, ma
+; RV64-NEXT:    vmacc.vx v8, a0, v12
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 %1,
     <vscale x 4 x i64> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1674,21 +1741,34 @@ declare <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64(
   i64,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i64> @intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vmacc.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    ret
+  iXLen, iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vsetvli zero, zero, e64, m4, tu, mu
+; RV32-NEXT:    vmacc.vv v8, v16, v12, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
+; RV64-NEXT:    vmacc.vx v8, a0, v12, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll
deleted file mode 100644
index 2aa86610307ea..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll
+++ /dev/null
@@ -1,1236 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i32);
-
-define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i32);
-
-define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vmadc.vv v0, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i32);
-
-define <vscale x 32 x i1> @intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vmadc.vv v0, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i32);
-
-define <vscale x 64 x i1> @intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vmadc.vv v0, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 64 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i32);
-
-define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vmadc.vv v0, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i32);
-
-define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vmadc.vv v0, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i32);
-
-define <vscale x 32 x i1> @intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vmadc.vv v0, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmadc.vv v0, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmadc.vv v0, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32);
-
-define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vmadc.vv v0, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vmadc.vv v0, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vmadc.vv v0, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT:    vmadc.vv v0, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  i8,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  i8,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  i8,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  i8,
-  i32);
-
-define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  i8,
-  i32);
-
-define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  i8,
-  i32);
-
-define <vscale x 32 x i1> @intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8(
-  <vscale x 64 x i8>,
-  i8,
-  i32);
-
-define <vscale x 64 x i1> @intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8(
-    <vscale x 64 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 64 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  i16,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  i16,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  i16,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  i16,
-  i32);
-
-define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  i16,
-  i32);
-
-define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16(
-  <vscale x 32 x i16>,
-  i16,
-  i32);
-
-define <vscale x 32 x i1> @intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16(
-    <vscale x 32 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  i32,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  i32,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  i32,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  i32,
-  i32);
-
-define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32(
-  <vscale x 16 x i32>,
-  i32,
-  i32);
-
-define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32(
-    <vscale x 16 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  i64,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT:    vlse64.v v9, (a0), zero
-; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    i64 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  i64,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vmadc.vv v0, v8, v10
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    i64 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  i64,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vmadc.vv v0, v8, v12
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    i64 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64(
-  <vscale x 8 x i64>,
-  i64,
-  i32);
-
-define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vmadc.vv v0, v8, v16
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64(
-    <vscale x 8 x i64> %0,
-    i64 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    i8 9,
-    i32 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    i8 -9,
-    i32 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    i8 9,
-    i32 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    i8 -9,
-    i32 %1)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    i8 9,
-    i32 %1)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    i8 -9,
-    i32 %1)
-
-  ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 64 x i1> @intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8(
-    <vscale x 64 x i8> %0,
-    i8 9,
-    i32 %1)
-
-  ret <vscale x 64 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    i16 -9,
-    i32 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    i16 9,
-    i32 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    i16 -9,
-    i32 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    i16 9,
-    i32 %1)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    i16 -9,
-    i32 %1)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16(
-    <vscale x 32 x i16> %0,
-    i16 9,
-    i32 %1)
-
-  ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    i32 -9,
-    i32 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    i32 -9,
-    i32 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32(
-    <vscale x 16 x i32> %0,
-    i32 -9,
-    i32 %1)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    i64 9,
-    i32 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    i64 -9,
-    i32 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    i64 9,
-    i32 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64(
-    <vscale x 8 x i64> %0,
-    i64 -9,
-    i32 %1)
-
-  ret <vscale x 8 x i1> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll
deleted file mode 100644
index 76800ea34380b..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll
+++ /dev/null
@@ -1,1388 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vmadc.vvm v10, v8, v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vmadc.vvm v10, v8, v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vmadc.vvm v10, v8, v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vmadc.vvm v10, v8, v9, v0
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vmadc.vvm v12, v8, v10, v0
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vmadc.vvm v16, v8, v12, v0
-; CHECK-NEXT:    vmv1r.v v0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i1>,
-  i64);
-
-define <vscale x 64 x i1> @intrinsic_vmadc.carry.in_vvm_nxv64i1_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv64i1_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vmadc.vvm v24, v8, v16, v0
-; CHECK-NEXT:    vmv1r.v v0, v24
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    <vscale x 64 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 64 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vmadc.vvm v10, v8, v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vmadc.vvm v10, v8, v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vmadc.vvm v10, v8, v9, v0
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vmadc.vvm v12, v8, v10, v0
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vmadc.vvm v16, v8, v12, v0
-; CHECK-NEXT:    vmv1r.v v0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vmadc.vvm v24, v8, v16, v0
-; CHECK-NEXT:    vmv1r.v v0, v24
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    <vscale x 32 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vmadc.vvm v10, v8, v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vmadc.vvm v10, v8, v9, v0
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmadc.vvm v12, v8, v10, v0
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmadc.vvm v16, v8, v12, v0
-; CHECK-NEXT:    vmv1r.v v0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vmadc.vvm v24, v8, v16, v0
-; CHECK-NEXT:    vmv1r.v v0, v24
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vmadc.vvm v10, v8, v9, v0
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vmadc.vvm v12, v8, v10, v0
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vmadc.vvm v16, v8, v12, v0
-; CHECK-NEXT:    vmv1r.v v0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT:    vmadc.vvm v24, v8, v16, v0
-; CHECK-NEXT:    vmv1r.v v0, v24
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vmadc.vxm v9, v8, a0, v0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vmadc.vxm v9, v8, a0, v0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vmadc.vxm v9, v8, a0, v0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vmadc.vxm v9, v8, a0, v0
-; CHECK-NEXT:    vmv.v.v v0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vmadc.vxm v10, v8, a0, v0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vmadc.vxm v12, v8, a0, v0
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    <vscale x 32 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.i8(
-  <vscale x 64 x i8>,
-  i8,
-  <vscale x 64 x i1>,
-  i64);
-
-define <vscale x 64 x i1> @intrinsic_vmadc.carry.in_vxm_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv64i1_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT:    vmadc.vxm v16, v8, a0, v0
-; CHECK-NEXT:    vmv1r.v v0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.i8(
-    <vscale x 64 x i8> %0,
-    i8 %1,
-    <vscale x 64 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 64 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vmadc.vxm v9, v8, a0, v0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vmadc.vxm v9, v8, a0, v0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vmadc.vxm v9, v8, a0, v0
-; CHECK-NEXT:    vmv.v.v v0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vmadc.vxm v10, v8, a0, v0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vmadc.vxm v12, v8, a0, v0
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.i16(
-  <vscale x 32 x i16>,
-  i16,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT:    vmadc.vxm v16, v8, a0, v0
-; CHECK-NEXT:    vmv1r.v v0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.i16(
-    <vscale x 32 x i16> %0,
-    i16 %1,
-    <vscale x 32 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vmadc.vxm v9, v8, a0, v0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vmadc.vxm v9, v8, a0, v0
-; CHECK-NEXT:    vmv.v.v v0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vmadc.vxm v10, v8, a0, v0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vmadc.vxm v12, v8, a0, v0
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.i32(
-  <vscale x 16 x i32>,
-  i32,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT:    vmadc.vxm v16, v8, a0, v0
-; CHECK-NEXT:    vmv1r.v v0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.i32(
-    <vscale x 16 x i32> %0,
-    i32 %1,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  i64,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vmadc.vxm v9, v8, a0, v0
-; CHECK-NEXT:    vmv.v.v v0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    i64 %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  i64,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vmadc.vxm v10, v8, a0, v0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    i64 %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  i64,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vmadc.vxm v12, v8, a0, v0
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    i64 %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.i64(
-  <vscale x 8 x i64>,
-  i64,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT:    vmadc.vxm v16, v8, a0, v0
-; CHECK-NEXT:    vmv1r.v v0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.i64(
-    <vscale x 8 x i64> %0,
-    i64 %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vmadc.vim v9, v8, 9, v0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    i8 9,
-    <vscale x 1 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vmadc.vim v9, v8, 9, v0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    i8 9,
-    <vscale x 2 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vmadc.vim v9, v8, 9, v0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    i8 9,
-    <vscale x 4 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vmadc.vim v9, v8, 9, v0
-; CHECK-NEXT:    vmv.v.v v0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    i8 9,
-    <vscale x 8 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vmadc.vim v10, v8, 9, v0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    i8 9,
-    <vscale x 16 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vmadc.vim v12, v8, 9, v0
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    i8 9,
-    <vscale x 32 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 64 x i1> @intrinsic_vmadc.carry.in_vim_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv64i1_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vmadc.vim v16, v8, 9, v0
-; CHECK-NEXT:    vmv1r.v v0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.i8(
-    <vscale x 64 x i8> %0,
-    i8 9,
-    <vscale x 64 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 64 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vmadc.vim v9, v8, 9, v0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    i16 9,
-    <vscale x 1 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vmadc.vim v9, v8, 9, v0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    i16 9,
-    <vscale x 2 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vmadc.vim v9, v8, 9, v0
-; CHECK-NEXT:    vmv.v.v v0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    i16 9,
-    <vscale x 4 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vmadc.vim v10, v8, 9, v0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    i16 9,
-    <vscale x 8 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vmadc.vim v12, v8, 9, v0
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    i16 9,
-    <vscale x 16 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vmadc.vim v16, v8, 9, v0
-; CHECK-NEXT:    vmv1r.v v0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.i16(
-    <vscale x 32 x i16> %0,
-    i16 9,
-    <vscale x 32 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vmadc.vim v9, v8, 9, v0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    i32 9,
-    <vscale x 1 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vmadc.vim v9, v8, 9, v0
-; CHECK-NEXT:    vmv.v.v v0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    i32 9,
-    <vscale x 2 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmadc.vim v10, v8, 9, v0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    i32 9,
-    <vscale x 4 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmadc.vim v12, v8, 9, v0
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    i32 9,
-    <vscale x 8 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vmadc.vim v16, v8, 9, v0
-; CHECK-NEXT:    vmv1r.v v0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.i32(
-    <vscale x 16 x i32> %0,
-    i32 9,
-    <vscale x 16 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vmadc.vim v9, v8, 9, v0
-; CHECK-NEXT:    vmv.v.v v0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    i64 9,
-    <vscale x 1 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vmadc.vim v10, v8, 9, v0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    i64 9,
-    <vscale x 2 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vmadc.vim v12, v8, 9, v0
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    i64 9,
-    <vscale x 4 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT:    vmadc.vim v16, v8, 9, v0
-; CHECK-NEXT:    vmv1r.v v0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.i64(
-    <vscale x 8 x i64> %0,
-    i64 9,
-    <vscale x 8 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in.ll
similarity index 83%
rename from llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in.ll
index b939b117a82f5..c24a9ba5ab8bf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
 declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -19,7 +21,7 @@ entry:
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
@@ -28,9 +30,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -42,7 +44,7 @@ entry:
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
@@ -51,9 +53,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -65,7 +67,7 @@ entry:
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
@@ -74,9 +76,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -88,7 +90,7 @@ entry:
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
@@ -97,9 +99,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -111,7 +113,7 @@ entry:
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i1> %a
 }
@@ -120,9 +122,9 @@ declare <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -134,7 +136,7 @@ entry:
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
     <vscale x 32 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 32 x i1> %a
 }
@@ -143,9 +145,9 @@ declare <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i1> @intrinsic_vmadc.carry.in_vvm_nxv64i1_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+define <vscale x 64 x i1> @intrinsic_vmadc.carry.in_vvm_nxv64i1_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv64i1_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
@@ -157,7 +159,7 @@ entry:
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
     <vscale x 64 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 64 x i1> %a
 }
@@ -166,9 +168,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -180,7 +182,7 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
@@ -189,9 +191,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -203,7 +205,7 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
@@ -212,9 +214,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -226,7 +228,7 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
@@ -235,9 +237,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -249,7 +251,7 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
@@ -258,9 +260,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -272,7 +274,7 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i1> %a
 }
@@ -281,9 +283,9 @@ declare <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
@@ -295,7 +297,7 @@ entry:
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
     <vscale x 32 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 32 x i1> %a
 }
@@ -304,9 +306,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -318,7 +320,7 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
@@ -327,9 +329,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -341,7 +343,7 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
@@ -350,9 +352,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -364,7 +366,7 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
@@ -373,9 +375,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -387,7 +389,7 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
@@ -396,9 +398,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
@@ -410,7 +412,7 @@ entry:
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i1> %a
 }
@@ -419,9 +421,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -433,7 +435,7 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
@@ -442,9 +444,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -456,7 +458,7 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
@@ -465,9 +467,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -479,7 +481,7 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
@@ -488,9 +490,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
@@ -502,7 +504,7 @@ entry:
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
@@ -511,9 +513,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -525,7 +527,7 @@ entry:
     <vscale x 1 x i8> %0,
     i8 %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
@@ -534,9 +536,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -548,7 +550,7 @@ entry:
     <vscale x 2 x i8> %0,
     i8 %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
@@ -557,9 +559,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -571,7 +573,7 @@ entry:
     <vscale x 4 x i8> %0,
     i8 %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
@@ -580,9 +582,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -594,7 +596,7 @@ entry:
     <vscale x 8 x i8> %0,
     i8 %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
@@ -603,9 +605,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -617,7 +619,7 @@ entry:
     <vscale x 16 x i8> %0,
     i8 %1,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i1> %a
 }
@@ -626,9 +628,9 @@ declare <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -640,7 +642,7 @@ entry:
     <vscale x 32 x i8> %0,
     i8 %1,
     <vscale x 32 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 32 x i1> %a
 }
@@ -649,9 +651,9 @@ declare <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.i8(
   <vscale x 64 x i8>,
   i8,
   <vscale x 64 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i1> @intrinsic_vmadc.carry.in_vxm_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+define <vscale x 64 x i1> @intrinsic_vmadc.carry.in_vxm_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv64i1_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
@@ -663,7 +665,7 @@ entry:
     <vscale x 64 x i8> %0,
     i8 %1,
     <vscale x 64 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 64 x i1> %a
 }
@@ -672,9 +674,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -686,7 +688,7 @@ entry:
     <vscale x 1 x i16> %0,
     i16 %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
@@ -695,9 +697,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -709,7 +711,7 @@ entry:
     <vscale x 2 x i16> %0,
     i16 %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
@@ -718,9 +720,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -732,7 +734,7 @@ entry:
     <vscale x 4 x i16> %0,
     i16 %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
@@ -741,9 +743,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -755,7 +757,7 @@ entry:
     <vscale x 8 x i16> %0,
     i16 %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
@@ -764,9 +766,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -778,7 +780,7 @@ entry:
     <vscale x 16 x i16> %0,
     i16 %1,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i1> %a
 }
@@ -787,9 +789,9 @@ declare <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.i16(
   <vscale x 32 x i16>,
   i16,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -801,7 +803,7 @@ entry:
     <vscale x 32 x i16> %0,
     i16 %1,
     <vscale x 32 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 32 x i1> %a
 }
@@ -810,9 +812,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -824,7 +826,7 @@ entry:
     <vscale x 1 x i32> %0,
     i32 %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
@@ -833,9 +835,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -847,7 +849,7 @@ entry:
     <vscale x 2 x i32> %0,
     i32 %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
@@ -856,9 +858,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -870,7 +872,7 @@ entry:
     <vscale x 4 x i32> %0,
     i32 %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
@@ -879,9 +881,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -893,7 +895,7 @@ entry:
     <vscale x 8 x i32> %0,
     i32 %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
@@ -902,9 +904,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.i32(
   <vscale x 16 x i32>,
   i32,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -916,7 +918,7 @@ entry:
     <vscale x 16 x i32> %0,
     i32 %1,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i1> %a
 }
@@ -925,27 +927,34 @@ declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
   <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vmadc.vvm v9, v8, v10, v0
-; CHECK-NEXT:    vmv.v.v v0, v9
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vmadc.vvm v9, v8, v10, v0
+; RV32-NEXT:    vmv.v.v v0, v9
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT:    vmadc.vxm v9, v8, a0, v0
+; RV64-NEXT:    vmv.v.v v0, v9
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
@@ -954,27 +963,34 @@ declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
   <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vmadc.vvm v10, v8, v12, v0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vmadc.vvm v10, v8, v12, v0
+; RV32-NEXT:    vmv1r.v v0, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT:    vmadc.vxm v10, v8, a0, v0
+; RV64-NEXT:    vmv1r.v v0, v10
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
@@ -983,27 +999,34 @@ declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
   <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vmadc.vvm v12, v8, v16, v0
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vmadc.vvm v12, v8, v16, v0
+; RV32-NEXT:    vmv1r.v v0, v12
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT:    vmadc.vxm v12, v8, a0, v0
+; RV64-NEXT:    vmv1r.v v0, v12
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1012,32 +1035,39 @@ declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.i64(
   <vscale x 8 x i64>,
   i64,
   <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT:    vlse64.v v24, (a0), zero
-; CHECK-NEXT:    vmadc.vvm v16, v8, v24, v0
-; CHECK-NEXT:    vmv1r.v v0, v16
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT:    vlse64.v v24, (a0), zero
+; RV32-NEXT:    vmadc.vvm v16, v8, v24, v0
+; RV32-NEXT:    vmv1r.v v0, v16
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT:    vmadc.vxm v16, v8, a0, v0
+; RV64-NEXT:    vmv1r.v v0, v16
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.i64(
     <vscale x 8 x i64> %0,
     i64 %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -1049,12 +1079,12 @@ entry:
     <vscale x 1 x i8> %0,
     i8 9,
     <vscale x 1 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -1066,12 +1096,12 @@ entry:
     <vscale x 2 x i8> %0,
     i8 9,
     <vscale x 2 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -1083,12 +1113,12 @@ entry:
     <vscale x 4 x i8> %0,
     i8 9,
     <vscale x 4 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -1100,12 +1130,12 @@ entry:
     <vscale x 8 x i8> %0,
     i8 9,
     <vscale x 8 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -1117,12 +1147,12 @@ entry:
     <vscale x 16 x i8> %0,
     i8 9,
     <vscale x 16 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -1134,12 +1164,12 @@ entry:
     <vscale x 32 x i8> %0,
     i8 9,
     <vscale x 32 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i1> %a
 }
 
-define <vscale x 64 x i1> @intrinsic_vmadc.carry.in_vim_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
+define <vscale x 64 x i1> @intrinsic_vmadc.carry.in_vim_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv64i1_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
@@ -1151,12 +1181,12 @@ entry:
     <vscale x 64 x i8> %0,
     i8 9,
     <vscale x 64 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -1168,12 +1198,12 @@ entry:
     <vscale x 1 x i16> %0,
     i16 9,
     <vscale x 1 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -1185,12 +1215,12 @@ entry:
     <vscale x 2 x i16> %0,
     i16 9,
     <vscale x 2 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -1202,12 +1232,12 @@ entry:
     <vscale x 4 x i16> %0,
     i16 9,
     <vscale x 4 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -1219,12 +1249,12 @@ entry:
     <vscale x 8 x i16> %0,
     i16 9,
     <vscale x 8 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -1236,12 +1266,12 @@ entry:
     <vscale x 16 x i16> %0,
     i16 9,
     <vscale x 16 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
@@ -1253,12 +1283,12 @@ entry:
     <vscale x 32 x i16> %0,
     i16 9,
     <vscale x 32 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -1270,12 +1300,12 @@ entry:
     <vscale x 1 x i32> %0,
     i32 9,
     <vscale x 1 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -1287,12 +1317,12 @@ entry:
     <vscale x 2 x i32> %0,
     i32 9,
     <vscale x 2 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -1304,12 +1334,12 @@ entry:
     <vscale x 4 x i32> %0,
     i32 9,
     <vscale x 4 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -1321,12 +1351,12 @@ entry:
     <vscale x 8 x i32> %0,
     i32 9,
     <vscale x 8 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
@@ -1338,12 +1368,12 @@ entry:
     <vscale x 16 x i32> %0,
     i32 9,
     <vscale x 16 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -1355,12 +1385,12 @@ entry:
     <vscale x 1 x i64> %0,
     i64 9,
     <vscale x 1 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -1372,12 +1402,12 @@ entry:
     <vscale x 2 x i64> %0,
     i64 9,
     <vscale x 2 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -1389,12 +1419,12 @@ entry:
     <vscale x 4 x i64> %0,
     i64 9,
     <vscale x 4 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
@@ -1406,7 +1436,7 @@ entry:
     <vscale x 8 x i64> %0,
     i64 9,
     <vscale x 8 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc.ll
similarity index 79%
rename from llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll
rename to llvm/test/CodeGen/RISCV/rvv/vmadc.ll
index a13cc8ab41af7..bfa4ce38e3d68 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmadc.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
 declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -16,7 +18,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -24,9 +26,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -36,7 +38,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -44,9 +46,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -56,7 +58,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -64,9 +66,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -76,7 +78,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -84,9 +86,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -96,7 +98,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -104,9 +106,9 @@ entry:
 declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -116,7 +118,7 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i1> %a
 }
@@ -124,9 +126,9 @@ entry:
 declare <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 64 x i1> @intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
+define <vscale x 64 x i1> @intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
@@ -136,7 +138,7 @@ entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i1> %a
 }
@@ -144,9 +146,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -156,7 +158,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -164,9 +166,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -176,7 +178,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -184,9 +186,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -196,7 +198,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -204,9 +206,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -216,7 +218,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -224,9 +226,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -236,7 +238,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -244,9 +246,9 @@ entry:
 declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
@@ -256,7 +258,7 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i1> %a
 }
@@ -264,9 +266,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -276,7 +278,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -284,9 +286,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -296,7 +298,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -304,9 +306,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -316,7 +318,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -324,9 +326,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -336,7 +338,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -344,9 +346,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
@@ -356,7 +358,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -364,9 +366,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -376,7 +378,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -384,9 +386,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -396,7 +398,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -404,9 +406,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -416,7 +418,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -424,9 +426,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
@@ -436,7 +438,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -444,9 +446,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -456,7 +458,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -464,9 +466,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -476,7 +478,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -484,9 +486,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -496,7 +498,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -504,9 +506,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -516,7 +518,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -524,9 +526,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -536,7 +538,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -544,9 +546,9 @@ entry:
 declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -556,7 +558,7 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i1> %a
 }
@@ -564,9 +566,9 @@ entry:
 declare <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8(
   <vscale x 64 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 64 x i1> @intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 64 x i1> @intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
@@ -576,7 +578,7 @@ entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8(
     <vscale x 64 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i1> %a
 }
@@ -584,9 +586,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -596,7 +598,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -604,9 +606,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -616,7 +618,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -624,9 +626,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -636,7 +638,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -644,9 +646,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -656,7 +658,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -664,9 +666,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -676,7 +678,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -684,9 +686,9 @@ entry:
 declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16(
   <vscale x 32 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -696,7 +698,7 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16(
     <vscale x 32 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i1> %a
 }
@@ -704,9 +706,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -716,7 +718,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     i32 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -724,9 +726,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -736,7 +738,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     i32 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -744,9 +746,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -756,7 +758,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     i32 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -764,9 +766,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -776,7 +778,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     i32 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -784,9 +786,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32(
   <vscale x 16 x i32>,
   i32,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -796,7 +798,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32(
     <vscale x 16 x i32> %0,
     i32 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -804,19 +806,31 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vmadc.vv v0, v8, v9
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT:    vmadc.vx v0, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -824,19 +838,31 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vmadc.vv v0, v8, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT:    vmadc.vx v0, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -844,19 +870,31 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vmadc.vv v0, v8, v12
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT:    vmadc.vx v0, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -864,24 +902,36 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64(
   <vscale x 8 x i64>,
   i64,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vmadc.vv v0, v8, v16
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT:    vmadc.vx v0, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64(
     <vscale x 8 x i64> %0,
     i64 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -891,12 +941,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
     i8 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -906,12 +956,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
     i8 -9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -921,12 +971,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
     i8 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -936,12 +986,12 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
     i8 -9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -951,12 +1001,12 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
     i8 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 32 x i1> @intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -966,12 +1016,12 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
     i8 -9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 32 x i1> %a
 }
 
-define <vscale x 64 x i1> @intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i64 %1) nounwind {
+define <vscale x 64 x i1> @intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
@@ -981,12 +1031,12 @@ entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8(
     <vscale x 64 x i8> %0,
     i8 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 64 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -996,12 +1046,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
     i16 -9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -1011,12 +1061,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
     i16 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -1026,12 +1076,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
     i16 -9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -1041,12 +1091,12 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
     i16 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -1056,12 +1106,12 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
     i16 -9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 32 x i1> @intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i64 %1) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
@@ -1071,12 +1121,12 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16(
     <vscale x 32 x i16> %0,
     i16 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 32 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -1086,12 +1136,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     i32 -9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -1101,12 +1151,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     i32 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -1116,12 +1166,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     i32 -9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -1131,12 +1181,12 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     i32 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i64 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
@@ -1146,12 +1196,12 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32(
     <vscale x 16 x i32> %0,
     i32 -9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -1161,12 +1211,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -1176,12 +1226,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 -9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -1191,12 +1241,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
@@ -1206,7 +1256,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64(
     <vscale x 8 x i64> %0,
     i64 -9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i1> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll
deleted file mode 100644
index d672e2768e661..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll
+++ /dev/null
@@ -1,1694 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8>  @intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
-; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i8>  @intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8>  @intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
-; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i8>  @intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
-; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8>  @intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
-; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i8>  @intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
-; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8>  @intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
-; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64, i64);
-
-define <vscale x 8 x i8>  @intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
-; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8>  @intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
-; CHECK-NEXT:    vmadd.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64, i64);
-
-define <vscale x 16 x i8>  @intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
-; CHECK-NEXT:    vmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8>  @intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
-; CHECK-NEXT:    vmadd.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64, i64);
-
-define <vscale x 32 x i8>  @intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
-; CHECK-NEXT:    vmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16>  @intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
-; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i16>  @intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16>  @intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
-; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i16>  @intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16>  @intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
-; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i16>  @intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16>  @intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
-; CHECK-NEXT:    vmadd.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64, i64);
-
-define <vscale x 8 x i16>  @intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16>  @intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
-; CHECK-NEXT:    vmadd.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64, i64);
-
-define <vscale x 16 x i16>  @intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32>  @intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
-; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i32>  @intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32>  @intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
-; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i32>  @intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32>  @intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
-; CHECK-NEXT:    vmadd.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i32>  @intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32>  @intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
-; CHECK-NEXT:    vmadd.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64, i64);
-
-define <vscale x 8 x i32>  @intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64>  @intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
-; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i64>  @intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64>  @intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
-; CHECK-NEXT:    vmadd.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i64>  @intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64>  @intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
-; CHECK-NEXT:    vmadd.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i64>  @intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i8>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8>  @intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
-; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    <vscale x 1 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i8> @intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
-; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i8>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8>  @intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
-; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    <vscale x 2 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i8> @intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
-; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i8>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8>  @intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, ma
-; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    <vscale x 4 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i8> @intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
-; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i8>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8>  @intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, ma
-; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    <vscale x 8 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64, i64);
-
-define <vscale x 8 x i8> @intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
-; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i8>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8>  @intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, ma
-; CHECK-NEXT:    vmadd.vx v8, a0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    <vscale x 16 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64, i64);
-
-define <vscale x 16 x i8> @intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
-; CHECK-NEXT:    vmadd.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i8>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8>  @intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, ma
-; CHECK-NEXT:    vmadd.vx v8, a0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    <vscale x 32 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64, i64);
-
-define <vscale x 32 x i8> @intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
-; CHECK-NEXT:    vmadd.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i16>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16>  @intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
-; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    <vscale x 1 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i16> @intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
-; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i16>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16>  @intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
-; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    <vscale x 2 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i16> @intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
-; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i16>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16>  @intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
-; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    <vscale x 4 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i16> @intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
-; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i16>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16>  @intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, ma
-; CHECK-NEXT:    vmadd.vx v8, a0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    <vscale x 8 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64, i64);
-
-define <vscale x 8 x i16> @intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
-; CHECK-NEXT:    vmadd.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i16>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16>  @intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, ma
-; CHECK-NEXT:    vmadd.vx v8, a0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    <vscale x 16 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64, i64);
-
-define <vscale x 16 x i16> @intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
-; CHECK-NEXT:    vmadd.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i32>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32>  @intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
-; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    <vscale x 1 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i32> @intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i32>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32>  @intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
-; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    <vscale x 2 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i32> @intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i32>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32>  @intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
-; CHECK-NEXT:    vmadd.vx v8, a0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    <vscale x 4 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i32> @intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vmadd.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i32>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32>  @intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, ma
-; CHECK-NEXT:    vmadd.vx v8, a0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    <vscale x 8 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64, i64);
-
-define <vscale x 8 x i32> @intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vmadd.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  i64,
-  <vscale x 1 x i64>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64>  @intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
-; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    i64 %1,
-    <vscale x 1 x i64> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  i64,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i64> @intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    i64 %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  i64,
-  <vscale x 2 x i64>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64>  @intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, ma
-; CHECK-NEXT:    vmadd.vx v8, a0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    i64 %1,
-    <vscale x 2 x i64> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  i64,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i64> @intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vmadd.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    i64 %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  i64,
-  <vscale x 4 x i64>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64>  @intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, ma
-; CHECK-NEXT:    vmadd.vx v8, a0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    i64 %1,
-    <vscale x 4 x i64> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  i64,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i64> @intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vmadd.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    i64 %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadd.ll
similarity index 80%
rename from llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vmadd.ll
index 547b42243e2a0..8eed06064bc6b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmadd.ll
@@ -1,14 +1,16 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
 declare <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8>  @intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+define <vscale x 1 x i8>  @intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
@@ -19,7 +21,7 @@ entry:
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -29,9 +31,9 @@ declare <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i8>  @intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8>  @intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
@@ -43,7 +45,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -52,10 +54,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8>  @intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+define <vscale x 2 x i8>  @intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
@@ -66,7 +68,7 @@ entry:
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -76,9 +78,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i8>  @intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8>  @intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
@@ -90,7 +92,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -99,10 +101,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8>  @intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+define <vscale x 4 x i8>  @intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
@@ -113,7 +115,7 @@ entry:
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -123,9 +125,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i8>  @intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8>  @intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
@@ -137,7 +139,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -146,10 +148,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8>  @intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+define <vscale x 8 x i8>  @intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
@@ -160,7 +162,7 @@ entry:
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -170,9 +172,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 8 x i8>  @intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8>  @intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
@@ -184,7 +186,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -193,10 +195,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8>  @intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+define <vscale x 16 x i8>  @intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
@@ -207,7 +209,7 @@ entry:
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -217,9 +219,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 16 x i8>  @intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8>  @intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
@@ -231,7 +233,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -240,10 +242,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8>  @intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+define <vscale x 32 x i8>  @intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
@@ -254,7 +256,7 @@ entry:
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -264,9 +266,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 32 x i8>  @intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8>  @intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
@@ -278,7 +280,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -287,10 +289,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16>  @intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+define <vscale x 1 x i16>  @intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
@@ -301,7 +303,7 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -311,9 +313,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i16>  @intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16>  @intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -325,7 +327,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -334,10 +336,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16>  @intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+define <vscale x 2 x i16>  @intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
@@ -348,7 +350,7 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -358,9 +360,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i16>  @intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16>  @intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -372,7 +374,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -381,10 +383,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16>  @intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define <vscale x 4 x i16>  @intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
@@ -395,7 +397,7 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -405,9 +407,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i16>  @intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16>  @intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -419,7 +421,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -428,10 +430,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16>  @intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+define <vscale x 8 x i16>  @intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
@@ -442,7 +444,7 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -452,9 +454,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 8 x i16>  @intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16>  @intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -466,7 +468,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -475,10 +477,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16>  @intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+define <vscale x 16 x i16>  @intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
@@ -489,7 +491,7 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -499,9 +501,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 16 x i16>  @intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16>  @intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -513,7 +515,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -522,10 +524,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32>  @intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+define <vscale x 1 x i32>  @intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
@@ -536,7 +538,7 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -546,9 +548,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i32>  @intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32>  @intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -560,7 +562,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -569,10 +571,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32>  @intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+define <vscale x 2 x i32>  @intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
@@ -583,7 +585,7 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -593,9 +595,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i32>  @intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32>  @intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -607,7 +609,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -616,10 +618,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32>  @intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+define <vscale x 4 x i32>  @intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
@@ -630,7 +632,7 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -640,9 +642,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i32>  @intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32>  @intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -654,7 +656,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -663,10 +665,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32>  @intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+define <vscale x 8 x i32>  @intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
@@ -677,7 +679,7 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -687,9 +689,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 8 x i32>  @intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32>  @intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -701,7 +703,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -710,10 +712,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64>  @intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+define <vscale x 1 x i64>  @intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
@@ -724,7 +726,7 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -734,9 +736,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i64>  @intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64>  @intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -748,7 +750,7 @@ entry:
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -757,10 +759,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64>  @intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+define <vscale x 2 x i64>  @intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
@@ -771,7 +773,7 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -781,9 +783,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i64>  @intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64>  @intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -795,7 +797,7 @@ entry:
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -804,10 +806,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64>  @intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+define <vscale x 4 x i64>  @intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
@@ -818,7 +820,7 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -828,9 +830,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i64>  @intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64>  @intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -842,7 +844,7 @@ entry:
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -851,10 +853,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8>  @intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+define <vscale x 1 x i8>  @intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
@@ -865,7 +867,7 @@ entry:
     <vscale x 1 x i8> %0,
     i8 %1,
     <vscale x 1 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -875,9 +877,9 @@ declare <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8(
   i8,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
@@ -889,7 +891,7 @@ entry:
     i8 %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -898,10 +900,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8>  @intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+define <vscale x 2 x i8>  @intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
@@ -912,7 +914,7 @@ entry:
     <vscale x 2 x i8> %0,
     i8 %1,
     <vscale x 2 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -922,9 +924,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8(
   i8,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
@@ -936,7 +938,7 @@ entry:
     i8 %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -945,10 +947,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8>  @intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+define <vscale x 4 x i8>  @intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, ma
@@ -959,7 +961,7 @@ entry:
     <vscale x 4 x i8> %0,
     i8 %1,
     <vscale x 4 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -969,9 +971,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8(
   i8,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
@@ -983,7 +985,7 @@ entry:
     i8 %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -992,10 +994,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8>  @intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+define <vscale x 8 x i8>  @intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, ma
@@ -1006,7 +1008,7 @@ entry:
     <vscale x 8 x i8> %0,
     i8 %1,
     <vscale x 8 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1016,9 +1018,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8(
   i8,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
@@ -1030,7 +1032,7 @@ entry:
     i8 %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1039,10 +1041,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8>  @intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+define <vscale x 16 x i8>  @intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, ma
@@ -1053,7 +1055,7 @@ entry:
     <vscale x 16 x i8> %0,
     i8 %1,
     <vscale x 16 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1063,9 +1065,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8(
   i8,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
@@ -1077,7 +1079,7 @@ entry:
     i8 %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1086,10 +1088,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8>  @intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+define <vscale x 32 x i8>  @intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, ma
@@ -1100,7 +1102,7 @@ entry:
     <vscale x 32 x i8> %0,
     i8 %1,
     <vscale x 32 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1110,9 +1112,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8(
   i8,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
@@ -1124,7 +1126,7 @@ entry:
     i8 %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1133,10 +1135,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16>  @intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+define <vscale x 1 x i16>  @intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
@@ -1147,7 +1149,7 @@ entry:
     <vscale x 1 x i16> %0,
     i16 %1,
     <vscale x 1 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1157,9 +1159,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16(
   i16,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
@@ -1171,7 +1173,7 @@ entry:
     i16 %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1180,10 +1182,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16>  @intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+define <vscale x 2 x i16>  @intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
@@ -1194,7 +1196,7 @@ entry:
     <vscale x 2 x i16> %0,
     i16 %1,
     <vscale x 2 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1204,9 +1206,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16(
   i16,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
@@ -1218,7 +1220,7 @@ entry:
     i16 %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1227,10 +1229,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16>  @intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define <vscale x 4 x i16>  @intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
@@ -1241,7 +1243,7 @@ entry:
     <vscale x 4 x i16> %0,
     i16 %1,
     <vscale x 4 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1251,9 +1253,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16(
   i16,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
@@ -1265,7 +1267,7 @@ entry:
     i16 %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1274,10 +1276,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16>  @intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+define <vscale x 8 x i16>  @intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, ma
@@ -1288,7 +1290,7 @@ entry:
     <vscale x 8 x i16> %0,
     i16 %1,
     <vscale x 8 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1298,9 +1300,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16(
   i16,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
@@ -1312,7 +1314,7 @@ entry:
     i16 %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1321,10 +1323,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16>  @intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+define <vscale x 16 x i16>  @intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, ma
@@ -1335,7 +1337,7 @@ entry:
     <vscale x 16 x i16> %0,
     i16 %1,
     <vscale x 16 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1345,9 +1347,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16(
   i16,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
@@ -1359,7 +1361,7 @@ entry:
     i16 %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1368,10 +1370,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32>  @intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+define <vscale x 1 x i32>  @intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
@@ -1382,7 +1384,7 @@ entry:
     <vscale x 1 x i32> %0,
     i32 %1,
     <vscale x 1 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1392,9 +1394,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32(
   i32,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
@@ -1406,7 +1408,7 @@ entry:
     i32 %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1415,10 +1417,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32>  @intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+define <vscale x 2 x i32>  @intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
@@ -1429,7 +1431,7 @@ entry:
     <vscale x 2 x i32> %0,
     i32 %1,
     <vscale x 2 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1439,9 +1441,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32(
   i32,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
@@ -1453,7 +1455,7 @@ entry:
     i32 %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1462,10 +1464,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32>  @intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+define <vscale x 4 x i32>  @intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
@@ -1476,7 +1478,7 @@ entry:
     <vscale x 4 x i32> %0,
     i32 %1,
     <vscale x 4 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1486,9 +1488,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32(
   i32,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
@@ -1500,7 +1502,7 @@ entry:
     i32 %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1509,10 +1511,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32>  @intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+define <vscale x 8 x i32>  @intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, ma
@@ -1523,7 +1525,7 @@ entry:
     <vscale x 8 x i32> %0,
     i32 %1,
     <vscale x 8 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1533,9 +1535,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32(
   i32,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
@@ -1547,7 +1549,7 @@ entry:
     i32 %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1556,28 +1558,34 @@ declare <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
   <vscale x 1 x i64>,
-  i32,
-  i32);
-
-define <vscale x 1 x i64>  @intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vsetvli zero, zero, e64, m1, tu, ma
-; CHECK-NEXT:    vmadd.vv v8, v10, v9
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i64>  @intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vsetvli zero, zero, e64, m1, tu, ma
+; RV32-NEXT:    vmadd.vv v8, v10, v9
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
+; RV64-NEXT:    vmadd.vx v8, a0, v9
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 %1,
     <vscale x 1 x i64> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1587,28 +1595,34 @@ declare <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64(
   i64,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32, i32);
-
-define <vscale x 1 x i64> @intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vsetvli zero, zero, e64, m1, tu, mu
-; CHECK-NEXT:    vmadd.vv v8, v10, v9, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen, iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vsetvli zero, zero, e64, m1, tu, mu
+; RV32-NEXT:    vmadd.vv v8, v10, v9, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
+; RV64-NEXT:    vmadd.vx v8, a0, v9, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1617,28 +1631,34 @@ declare <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
   <vscale x 2 x i64>,
-  i32,
-  i32);
-
-define <vscale x 2 x i64>  @intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vsetvli zero, zero, e64, m2, tu, ma
-; CHECK-NEXT:    vmadd.vv v8, v12, v10
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i64>  @intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vsetvli zero, zero, e64, m2, tu, ma
+; RV32-NEXT:    vmadd.vv v8, v12, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, tu, ma
+; RV64-NEXT:    vmadd.vx v8, a0, v10
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 %1,
     <vscale x 2 x i64> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1648,28 +1668,34 @@ declare <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64(
   i64,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32, i32);
-
-define <vscale x 2 x i64> @intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vsetvli zero, zero, e64, m2, tu, mu
-; CHECK-NEXT:    vmadd.vv v8, v12, v10, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen, iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vsetvli zero, zero, e64, m2, tu, mu
+; RV32-NEXT:    vmadd.vv v8, v12, v10, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
+; RV64-NEXT:    vmadd.vx v8, a0, v10, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1678,28 +1704,34 @@ declare <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
   <vscale x 4 x i64>,
-  i32,
-  i32);
-
-define <vscale x 4 x i64>  @intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vsetvli zero, zero, e64, m4, tu, ma
-; CHECK-NEXT:    vmadd.vv v8, v16, v12
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i64>  @intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vsetvli zero, zero, e64, m4, tu, ma
+; RV32-NEXT:    vmadd.vv v8, v16, v12
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, tu, ma
+; RV64-NEXT:    vmadd.vx v8, a0, v12
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 %1,
     <vscale x 4 x i64> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1709,28 +1741,34 @@ declare <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64(
   i64,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32, i32);
-
-define <vscale x 4 x i64> @intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vsetvli zero, zero, e64, m4, tu, mu
-; CHECK-NEXT:    vmadd.vv v8, v16, v12, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen, iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vsetvli zero, zero, e64, m4, tu, mu
+; RV32-NEXT:    vmadd.vv v8, v16, v12, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
+; RV64-NEXT:    vmadd.vx v8, a0, v12, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll
deleted file mode 100644
index abb8abf274973..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll
+++ /dev/null
@@ -1,2122 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i32);
-
-define <vscale x 1 x i8> @intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i8> @intrinsic_vmax_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i32);
-
-define <vscale x 2 x i8> @intrinsic_vmax_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i8> @intrinsic_vmax_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i32);
-
-define <vscale x 4 x i8> @intrinsic_vmax_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i8> @intrinsic_vmax_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i32);
-
-define <vscale x 8 x i8> @intrinsic_vmax_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i8> @intrinsic_vmax_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i32);
-
-define <vscale x 16 x i8> @intrinsic_vmax_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vmax.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i8> @intrinsic_vmax_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i32);
-
-define <vscale x 32 x i8> @intrinsic_vmax_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vmax.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i32,
-  i32);
-
-define <vscale x 32 x i8> @intrinsic_vmax_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i32);
-
-define <vscale x 64 x i8> @intrinsic_vmax_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vmax.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i1>,
-  i32,
-  i32);
-
-define <vscale x 64 x i8> @intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8r.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    <vscale x 64 x i8> %2,
-    <vscale x 64 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i32);
-
-define <vscale x 1 x i16> @intrinsic_vmax_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i16> @intrinsic_vmax_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i32);
-
-define <vscale x 2 x i16> @intrinsic_vmax_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i16> @intrinsic_vmax_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vmax_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vmax_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i32);
-
-define <vscale x 8 x i16> @intrinsic_vmax_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vmax.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i16> @intrinsic_vmax_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i32);
-
-define <vscale x 16 x i16> @intrinsic_vmax_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vmax.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i16> @intrinsic_vmax_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i32);
-
-define <vscale x 32 x i16> @intrinsic_vmax_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vmax.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i1>,
-  i32,
-  i32);
-
-define <vscale x 32 x i16> @intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re16.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    <vscale x 32 x i16> %2,
-    <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vmax_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vmax_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vmax_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vmax_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vmax_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmax.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vmax_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vmax_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmax.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vmax_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vmax_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vmax.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re32.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vmax_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vmax_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vmax_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vmax.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vmax_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vmax_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vmax.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vmax_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vmax_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT:    vmax.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re64.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i8,
-  i32);
-
-define <vscale x 1 x i8> @intrinsic_vmax_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i8> @intrinsic_vmax_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i8 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i8,
-  i32);
-
-define <vscale x 2 x i8> @intrinsic_vmax_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv2i8_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i8> @intrinsic_vmax_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i8_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i8 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i8,
-  i32);
-
-define <vscale x 4 x i8> @intrinsic_vmax_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv4i8_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i8> @intrinsic_vmax_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i8_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i8 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i8,
-  i32);
-
-define <vscale x 8 x i8> @intrinsic_vmax_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv8i8_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i8> @intrinsic_vmax_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i8_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i8 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i8,
-  i32);
-
-define <vscale x 16 x i8> @intrinsic_vmax_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv16i8_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i8> @intrinsic_vmax_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i8_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vmax.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i8 %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i8,
-  i32);
-
-define <vscale x 32 x i8> @intrinsic_vmax_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv32i8_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i1>,
-  i32,
-  i32);
-
-define <vscale x 32 x i8> @intrinsic_vmax_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv32i8_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vmax.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i8 %2,
-    <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i8,
-  i32);
-
-define <vscale x 64 x i8> @intrinsic_vmax_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv64i8_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i8,
-  <vscale x 64 x i1>,
-  i32,
-  i32);
-
-define <vscale x 64 x i8> @intrinsic_vmax_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv64i8_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vmax.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    i8 %2,
-    <vscale x 64 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i16,
-  i32);
-
-define <vscale x 1 x i16> @intrinsic_vmax_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv1i16_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i16> @intrinsic_vmax_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i16_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i16 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i16,
-  i32);
-
-define <vscale x 2 x i16> @intrinsic_vmax_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv2i16_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i16> @intrinsic_vmax_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i16_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i16 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i16,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vmax_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv4i16_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vmax_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i16_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i16 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i16,
-  i32);
-
-define <vscale x 8 x i16> @intrinsic_vmax_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv8i16_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i16> @intrinsic_vmax_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i16_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vmax.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i16 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i16,
-  i32);
-
-define <vscale x 16 x i16> @intrinsic_vmax_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv16i16_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i16> @intrinsic_vmax_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i16_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vmax.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i16 %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i16,
-  i32);
-
-define <vscale x 32 x i16> @intrinsic_vmax_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv32i16_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i16,
-  <vscale x 32 x i1>,
-  i32,
-  i32);
-
-define <vscale x 32 x i16> @intrinsic_vmax_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv32i16_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vmax.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i16 %2,
-    <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vmax_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv1i32_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vmax_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i32_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vmax_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv2i32_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vmax_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i32_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vmax_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv4i32_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vmax_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i32_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vmax.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vmax_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv8i32_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vmax_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i32_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vmax.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vmax_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv16i32_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vmax_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i32_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vmax.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i32 %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vmax_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT:    vlse64.v v9, (a0), zero
-; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    i64 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vmax_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vmax.vv v8, v8, v10
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    i64 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vmax_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vmax.vv v8, v8, v12
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    i64 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vmax_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vmax.vv v8, v8, v16
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    i64 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
-; CHECK-NEXT:    vlse64.v v24, (a0), zero
-; CHECK-NEXT:    vmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i64 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmax.ll
similarity index 80%
rename from llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll
rename to llvm/test/CodeGen/RISCV/rvv/vmax.ll
index 598219805ebc8..6fd448cdae6c8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmax.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
 declare <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -18,7 +20,7 @@ entry:
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -28,10 +30,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vmax_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmax_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -43,7 +45,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -52,9 +54,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vmax_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmax_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -65,7 +67,7 @@ entry:
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -75,10 +77,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vmax_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmax_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -90,7 +92,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -99,9 +101,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vmax_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmax_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -112,7 +114,7 @@ entry:
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -122,10 +124,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vmax_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmax_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -137,7 +139,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -146,9 +148,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vmax_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmax_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -159,7 +161,7 @@ entry:
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -169,10 +171,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vmax_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmax_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -184,7 +186,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -193,9 +195,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vmax_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmax_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -206,7 +208,7 @@ entry:
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -216,10 +218,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vmax_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmax_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -231,7 +233,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -240,9 +242,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vmax_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmax_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -253,7 +255,7 @@ entry:
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
@@ -263,10 +265,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vmax_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmax_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -278,7 +280,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -287,9 +289,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vmax_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vmax_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
@@ -300,7 +302,7 @@ entry:
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i8> %a
 }
@@ -310,10 +312,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8r.v v24, (a0)
@@ -326,7 +328,7 @@ entry:
     <vscale x 64 x i8> %1,
     <vscale x 64 x i8> %2,
     <vscale x 64 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 64 x i8> %a
 }
@@ -335,9 +337,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vmax_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmax_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -348,7 +350,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -358,10 +360,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vmax_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmax_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -373,7 +375,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -382,9 +384,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vmax_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmax_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -395,7 +397,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -405,10 +407,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vmax_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmax_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -420,7 +422,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -429,9 +431,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vmax_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmax_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -442,7 +444,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -452,10 +454,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vmax_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmax_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -467,7 +469,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -476,9 +478,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vmax_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmax_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -489,7 +491,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -499,10 +501,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vmax_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmax_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -514,7 +516,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -523,9 +525,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vmax_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmax_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -536,7 +538,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -546,10 +548,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vmax_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmax_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -561,7 +563,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -570,9 +572,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vmax_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vmax_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
@@ -583,7 +585,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -593,10 +595,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
@@ -609,7 +611,7 @@ entry:
     <vscale x 32 x i16> %1,
     <vscale x 32 x i16> %2,
     <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -618,9 +620,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vmax_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmax_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -631,7 +633,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -641,10 +643,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vmax_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmax_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -656,7 +658,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -665,9 +667,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vmax_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmax_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -678,7 +680,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -688,10 +690,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vmax_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmax_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -703,7 +705,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -712,9 +714,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vmax_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmax_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -725,7 +727,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -735,10 +737,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vmax_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmax_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -750,7 +752,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -759,9 +761,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vmax_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmax_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -772,7 +774,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -782,10 +784,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vmax_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmax_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -797,7 +799,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -806,9 +808,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vmax_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vmax_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
@@ -819,7 +821,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -829,10 +831,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
@@ -845,7 +847,7 @@ entry:
     <vscale x 16 x i32> %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -854,9 +856,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vmax_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vmax_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -867,7 +869,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -877,10 +879,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vmax_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vmax_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -892,7 +894,7 @@ entry:
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -901,9 +903,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vmax_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vmax_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -914,7 +916,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -924,10 +926,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vmax_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vmax_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -939,7 +941,7 @@ entry:
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -948,9 +950,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vmax_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vmax_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -961,7 +963,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -971,10 +973,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vmax_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vmax_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -986,7 +988,7 @@ entry:
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -995,9 +997,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vmax_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vmax_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
@@ -1008,7 +1010,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1018,10 +1020,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
@@ -1034,7 +1036,7 @@ entry:
     <vscale x 8 x i64> %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1043,9 +1045,9 @@ declare <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vmax_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmax_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -1056,7 +1058,7 @@ entry:
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -1066,10 +1068,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vmax_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmax_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -1081,7 +1083,7 @@ entry:
     <vscale x 1 x i8> %1,
     i8 %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -1090,9 +1092,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vmax_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmax_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -1103,7 +1105,7 @@ entry:
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -1113,10 +1115,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vmax_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmax_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -1128,7 +1130,7 @@ entry:
     <vscale x 2 x i8> %1,
     i8 %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -1137,9 +1139,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vmax_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmax_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -1150,7 +1152,7 @@ entry:
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -1160,10 +1162,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vmax_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmax_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -1175,7 +1177,7 @@ entry:
     <vscale x 4 x i8> %1,
     i8 %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -1184,9 +1186,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vmax_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmax_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -1197,7 +1199,7 @@ entry:
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1207,10 +1209,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vmax_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmax_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -1222,7 +1224,7 @@ entry:
     <vscale x 8 x i8> %1,
     i8 %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1231,9 +1233,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vmax_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmax_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -1244,7 +1246,7 @@ entry:
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1254,10 +1256,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vmax_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmax_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -1269,7 +1271,7 @@ entry:
     <vscale x 16 x i8> %1,
     i8 %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1278,9 +1280,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vmax_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmax_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -1291,7 +1293,7 @@ entry:
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1301,10 +1303,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vmax_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmax_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1316,7 +1318,7 @@ entry:
     <vscale x 32 x i8> %1,
     i8 %2,
     <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1325,9 +1327,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vmax_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vmax_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vx_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
@@ -1338,7 +1340,7 @@ entry:
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i8> %a
 }
@@ -1348,10 +1350,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.i8(
   <vscale x 64 x i8>,
   i8,
   <vscale x 64 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vmax_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vmax_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
@@ -1363,7 +1365,7 @@ entry:
     <vscale x 64 x i8> %1,
     i8 %2,
     <vscale x 64 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 64 x i8> %a
 }
@@ -1372,9 +1374,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vmax_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmax_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1385,7 +1387,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1395,10 +1397,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vmax_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmax_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1410,7 +1412,7 @@ entry:
     <vscale x 1 x i16> %1,
     i16 %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1419,9 +1421,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vmax_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmax_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1432,7 +1434,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1442,10 +1444,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vmax_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmax_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1457,7 +1459,7 @@ entry:
     <vscale x 2 x i16> %1,
     i16 %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1466,9 +1468,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vmax_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmax_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1479,7 +1481,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1489,10 +1491,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vmax_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmax_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1504,7 +1506,7 @@ entry:
     <vscale x 4 x i16> %1,
     i16 %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1513,9 +1515,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vmax_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmax_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1526,7 +1528,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1536,10 +1538,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vmax_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmax_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1551,7 +1553,7 @@ entry:
     <vscale x 8 x i16> %1,
     i16 %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1560,9 +1562,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vmax_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmax_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1573,7 +1575,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1583,10 +1585,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vmax_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmax_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1598,7 +1600,7 @@ entry:
     <vscale x 16 x i16> %1,
     i16 %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1607,9 +1609,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vmax_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vmax_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vx_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -1620,7 +1622,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1630,10 +1632,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.i16(
   <vscale x 32 x i16>,
   i16,
   <vscale x 32 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vmax_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vmax_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -1645,7 +1647,7 @@ entry:
     <vscale x 32 x i16> %1,
     i16 %2,
     <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1654,9 +1656,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   i32,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vmax_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmax_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1667,7 +1669,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
     i32 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1677,10 +1679,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vmax_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmax_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1692,7 +1694,7 @@ entry:
     <vscale x 1 x i32> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1701,9 +1703,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   i32,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vmax_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmax_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1714,7 +1716,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
     i32 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1724,10 +1726,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vmax_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmax_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1739,7 +1741,7 @@ entry:
     <vscale x 2 x i32> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1748,9 +1750,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   i32,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vmax_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmax_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1761,7 +1763,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
     i32 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1771,10 +1773,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vmax_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmax_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1786,7 +1788,7 @@ entry:
     <vscale x 4 x i32> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1795,9 +1797,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   i32,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vmax_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmax_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1808,7 +1810,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
     i32 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1818,10 +1820,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vmax_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmax_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1833,7 +1835,7 @@ entry:
     <vscale x 8 x i32> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1842,9 +1844,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   i32,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vmax_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vmax_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_vx_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -1855,7 +1857,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
     i32 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1865,10 +1867,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.i32(
   <vscale x 16 x i32>,
   i32,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vmax_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vmax_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -1880,7 +1882,7 @@ entry:
     <vscale x 16 x i32> %1,
     i32 %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1889,20 +1891,32 @@ declare <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vmax_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vmax_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmax_vx_nxv1i64_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vmax.vv v8, v8, v9
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmax_vx_nxv1i64_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT:    vmax.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.i64(
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
     i64 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1912,22 +1926,34 @@ declare <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
   <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vmax.vv v8, v9, v10, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT:    vmax.vx v8, v9, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     i64 %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1936,20 +1962,32 @@ declare <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vmax_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vmax_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmax_vx_nxv2i64_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vmax.vv v8, v8, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmax_vx_nxv2i64_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT:    vmax.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.i64(
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
     i64 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1959,22 +1997,34 @@ declare <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
   <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vmax.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vmax.vv v8, v10, v12, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT:    vmax.vx v8, v10, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     i64 %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1983,20 +2033,32 @@ declare <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vmax_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vmax_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmax_vx_nxv4i64_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vmax.vv v8, v8, v12
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmax_vx_nxv4i64_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT:    vmax.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.i64(
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
     i64 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -2006,22 +2068,34 @@ declare <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
   <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vmax.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vmax.vv v8, v12, v16, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT:    vmax.vx v8, v12, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     i64 %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -2030,20 +2104,32 @@ declare <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vmax_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmax_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vmax_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmax_vx_nxv8i64_nxv8i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vmax.vv v8, v8, v16
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmax_vx_nxv8i64_nxv8i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT:    vmax.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.i64(
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
     i64 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -2053,22 +2139,34 @@ declare <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.i64(
   <vscale x 8 x i64>,
   i64,
   <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vmax.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT:    vlse64.v v24, (a0), zero
+; RV32-NEXT:    vmax.vv v8, v16, v24, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT:    vmax.vx v8, v16, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
     i64 %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll
deleted file mode 100644
index 0567a21b74662..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll
+++ /dev/null
@@ -1,2122 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i32);
-
-define <vscale x 1 x i8> @intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i8> @intrinsic_vmaxu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i32);
-
-define <vscale x 2 x i8> @intrinsic_vmaxu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i8> @intrinsic_vmaxu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i32);
-
-define <vscale x 4 x i8> @intrinsic_vmaxu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i8> @intrinsic_vmaxu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i32);
-
-define <vscale x 8 x i8> @intrinsic_vmaxu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i8> @intrinsic_vmaxu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i32);
-
-define <vscale x 16 x i8> @intrinsic_vmaxu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vmaxu.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i8> @intrinsic_vmaxu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmaxu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i32);
-
-define <vscale x 32 x i8> @intrinsic_vmaxu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vmaxu.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i32,
-  i32);
-
-define <vscale x 32 x i8> @intrinsic_vmaxu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmaxu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i32);
-
-define <vscale x 64 x i8> @intrinsic_vmaxu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vmaxu.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i1>,
-  i32,
-  i32);
-
-define <vscale x 64 x i8> @intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8r.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vmaxu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    <vscale x 64 x i8> %2,
-    <vscale x 64 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i32);
-
-define <vscale x 1 x i16> @intrinsic_vmaxu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i16> @intrinsic_vmaxu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i32);
-
-define <vscale x 2 x i16> @intrinsic_vmaxu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i16> @intrinsic_vmaxu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vmaxu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vmaxu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i32);
-
-define <vscale x 8 x i16> @intrinsic_vmaxu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vmaxu.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i16> @intrinsic_vmaxu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vmaxu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i32);
-
-define <vscale x 16 x i16> @intrinsic_vmaxu_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vmaxu.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i16> @intrinsic_vmaxu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vmaxu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i32);
-
-define <vscale x 32 x i16> @intrinsic_vmaxu_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vmaxu.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i1>,
-  i32,
-  i32);
-
-define <vscale x 32 x i16> @intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re16.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vmaxu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    <vscale x 32 x i16> %2,
-    <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vmaxu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vmaxu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vmaxu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vmaxu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vmaxu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmaxu.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vmaxu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vmaxu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vmaxu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmaxu.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vmaxu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vmaxu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vmaxu_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vmaxu.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re32.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vmaxu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vmaxu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vmaxu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vmaxu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vmaxu.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vmaxu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vmaxu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vmaxu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vmaxu.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vmaxu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vmaxu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vmaxu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT:    vmaxu.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vmaxu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re64.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vmaxu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i8,
-  i32);
-
-define <vscale x 1 x i8> @intrinsic_vmaxu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i8> @intrinsic_vmaxu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i8 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i8,
-  i32);
-
-define <vscale x 2 x i8> @intrinsic_vmaxu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv2i8_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i8> @intrinsic_vmaxu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i8_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i8 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i8,
-  i32);
-
-define <vscale x 4 x i8> @intrinsic_vmaxu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv4i8_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i8> @intrinsic_vmaxu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i8_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i8 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i8,
-  i32);
-
-define <vscale x 8 x i8> @intrinsic_vmaxu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv8i8_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i8> @intrinsic_vmaxu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i8_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i8 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i8,
-  i32);
-
-define <vscale x 16 x i8> @intrinsic_vmaxu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv16i8_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i8> @intrinsic_vmaxu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i8_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vmaxu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i8 %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i8,
-  i32);
-
-define <vscale x 32 x i8> @intrinsic_vmaxu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv32i8_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i1>,
-  i32,
-  i32);
-
-define <vscale x 32 x i8> @intrinsic_vmaxu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv32i8_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vmaxu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i8 %2,
-    <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i8,
-  i32);
-
-define <vscale x 64 x i8> @intrinsic_vmaxu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv64i8_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i8,
-  <vscale x 64 x i1>,
-  i32,
-  i32);
-
-define <vscale x 64 x i8> @intrinsic_vmaxu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv64i8_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vmaxu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    i8 %2,
-    <vscale x 64 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i16,
-  i32);
-
-define <vscale x 1 x i16> @intrinsic_vmaxu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv1i16_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i16> @intrinsic_vmaxu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i16_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i16 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i16,
-  i32);
-
-define <vscale x 2 x i16> @intrinsic_vmaxu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv2i16_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i16> @intrinsic_vmaxu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i16_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i16 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i16,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vmaxu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv4i16_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vmaxu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i16_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i16 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i16,
-  i32);
-
-define <vscale x 8 x i16> @intrinsic_vmaxu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv8i16_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i16> @intrinsic_vmaxu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i16_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vmaxu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i16 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i16,
-  i32);
-
-define <vscale x 16 x i16> @intrinsic_vmaxu_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv16i16_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i16> @intrinsic_vmaxu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i16_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vmaxu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i16 %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i16,
-  i32);
-
-define <vscale x 32 x i16> @intrinsic_vmaxu_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv32i16_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i16,
-  <vscale x 32 x i1>,
-  i32,
-  i32);
-
-define <vscale x 32 x i16> @intrinsic_vmaxu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv32i16_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vmaxu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i16 %2,
-    <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vmaxu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv1i32_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vmaxu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i32_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vmaxu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv2i32_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vmaxu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i32_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vmaxu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv4i32_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vmaxu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i32_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vmaxu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vmaxu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv8i32_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vmaxu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i32_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vmaxu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vmaxu_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv16i32_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vmaxu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i32_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vmaxu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i32 %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vmaxu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT:    vlse64.v v9, (a0), zero
-; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    i64 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vmaxu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vmaxu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vmaxu.vv v8, v8, v10
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    i64 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vmaxu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vmaxu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vmaxu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vmaxu.vv v8, v8, v12
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    i64 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vmaxu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vmaxu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vmaxu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vmaxu.vv v8, v8, v16
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    i64 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vmaxu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
-; CHECK-NEXT:    vlse64.v v24, (a0), zero
-; CHECK-NEXT:    vmaxu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i64 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu.ll
similarity index 80%
rename from llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll
rename to llvm/test/CodeGen/RISCV/rvv/vmaxu.ll
index 7ee7fa35cd68a..cea7f933c6ece 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
 declare <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -18,7 +20,7 @@ entry:
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -28,10 +30,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vmaxu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmaxu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -43,7 +45,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -52,9 +54,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vmaxu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmaxu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -65,7 +67,7 @@ entry:
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -75,10 +77,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vmaxu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmaxu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -90,7 +92,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -99,9 +101,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vmaxu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmaxu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -112,7 +114,7 @@ entry:
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -122,10 +124,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vmaxu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmaxu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -137,7 +139,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -146,9 +148,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vmaxu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmaxu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -159,7 +161,7 @@ entry:
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -169,10 +171,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vmaxu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmaxu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -184,7 +186,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -193,9 +195,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vmaxu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmaxu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -206,7 +208,7 @@ entry:
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -216,10 +218,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vmaxu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmaxu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -231,7 +233,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -240,9 +242,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vmaxu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmaxu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -253,7 +255,7 @@ entry:
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
@@ -263,10 +265,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vmaxu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmaxu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -278,7 +280,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -287,9 +289,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vmaxu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vmaxu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
@@ -300,7 +302,7 @@ entry:
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i8> %a
 }
@@ -310,10 +312,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8r.v v24, (a0)
@@ -326,7 +328,7 @@ entry:
     <vscale x 64 x i8> %1,
     <vscale x 64 x i8> %2,
     <vscale x 64 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 64 x i8> %a
 }
@@ -335,9 +337,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vmaxu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmaxu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -348,7 +350,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -358,10 +360,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vmaxu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmaxu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -373,7 +375,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -382,9 +384,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vmaxu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmaxu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -395,7 +397,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -405,10 +407,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vmaxu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmaxu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -420,7 +422,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -429,9 +431,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vmaxu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmaxu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -442,7 +444,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -452,10 +454,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vmaxu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmaxu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -467,7 +469,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -476,9 +478,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vmaxu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmaxu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -489,7 +491,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -499,10 +501,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vmaxu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmaxu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -514,7 +516,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -523,9 +525,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vmaxu_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmaxu_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -536,7 +538,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -546,10 +548,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vmaxu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmaxu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -561,7 +563,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -570,9 +572,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vmaxu_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vmaxu_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
@@ -583,7 +585,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -593,10 +595,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
@@ -609,7 +611,7 @@ entry:
     <vscale x 32 x i16> %1,
     <vscale x 32 x i16> %2,
     <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -618,9 +620,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vmaxu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmaxu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -631,7 +633,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -641,10 +643,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vmaxu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmaxu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -656,7 +658,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -665,9 +667,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vmaxu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmaxu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -678,7 +680,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -688,10 +690,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vmaxu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmaxu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -703,7 +705,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -712,9 +714,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vmaxu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmaxu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -725,7 +727,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -735,10 +737,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vmaxu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmaxu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -750,7 +752,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -759,9 +761,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vmaxu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmaxu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -772,7 +774,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -782,10 +784,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vmaxu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmaxu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -797,7 +799,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -806,9 +808,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vmaxu_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vmaxu_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
@@ -819,7 +821,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -829,10 +831,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
@@ -845,7 +847,7 @@ entry:
     <vscale x 16 x i32> %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -854,9 +856,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vmaxu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vmaxu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -867,7 +869,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -877,10 +879,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vmaxu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vmaxu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -892,7 +894,7 @@ entry:
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -901,9 +903,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vmaxu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vmaxu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -914,7 +916,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -924,10 +926,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vmaxu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vmaxu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -939,7 +941,7 @@ entry:
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -948,9 +950,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vmaxu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vmaxu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -961,7 +963,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -971,10 +973,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vmaxu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vmaxu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -986,7 +988,7 @@ entry:
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -995,9 +997,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vmaxu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vmaxu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
@@ -1008,7 +1010,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1018,10 +1020,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vmaxu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vmaxu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
@@ -1034,7 +1036,7 @@ entry:
     <vscale x 8 x i64> %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1043,9 +1045,9 @@ declare <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vmaxu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmaxu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -1056,7 +1058,7 @@ entry:
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -1066,10 +1068,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vmaxu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmaxu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -1081,7 +1083,7 @@ entry:
     <vscale x 1 x i8> %1,
     i8 %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -1090,9 +1092,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vmaxu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmaxu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -1103,7 +1105,7 @@ entry:
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -1113,10 +1115,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vmaxu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmaxu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -1128,7 +1130,7 @@ entry:
     <vscale x 2 x i8> %1,
     i8 %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -1137,9 +1139,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vmaxu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmaxu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -1150,7 +1152,7 @@ entry:
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -1160,10 +1162,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vmaxu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmaxu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -1175,7 +1177,7 @@ entry:
     <vscale x 4 x i8> %1,
     i8 %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -1184,9 +1186,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vmaxu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmaxu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -1197,7 +1199,7 @@ entry:
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1207,10 +1209,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vmaxu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmaxu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -1222,7 +1224,7 @@ entry:
     <vscale x 8 x i8> %1,
     i8 %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1231,9 +1233,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vmaxu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmaxu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -1244,7 +1246,7 @@ entry:
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1254,10 +1256,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vmaxu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmaxu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -1269,7 +1271,7 @@ entry:
     <vscale x 16 x i8> %1,
     i8 %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1278,9 +1280,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vmaxu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmaxu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -1291,7 +1293,7 @@ entry:
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1301,10 +1303,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vmaxu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmaxu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1316,7 +1318,7 @@ entry:
     <vscale x 32 x i8> %1,
     i8 %2,
     <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1325,9 +1327,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vmaxu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vmaxu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
@@ -1338,7 +1340,7 @@ entry:
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i8> %a
 }
@@ -1348,10 +1350,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.i8(
   <vscale x 64 x i8>,
   i8,
   <vscale x 64 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vmaxu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vmaxu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
@@ -1363,7 +1365,7 @@ entry:
     <vscale x 64 x i8> %1,
     i8 %2,
     <vscale x 64 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 64 x i8> %a
 }
@@ -1372,9 +1374,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vmaxu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmaxu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1385,7 +1387,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1395,10 +1397,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vmaxu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmaxu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1410,7 +1412,7 @@ entry:
     <vscale x 1 x i16> %1,
     i16 %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1419,9 +1421,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vmaxu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmaxu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1432,7 +1434,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1442,10 +1444,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vmaxu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmaxu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1457,7 +1459,7 @@ entry:
     <vscale x 2 x i16> %1,
     i16 %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1466,9 +1468,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vmaxu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmaxu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1479,7 +1481,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1489,10 +1491,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vmaxu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmaxu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1504,7 +1506,7 @@ entry:
     <vscale x 4 x i16> %1,
     i16 %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1513,9 +1515,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vmaxu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmaxu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1526,7 +1528,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1536,10 +1538,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vmaxu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmaxu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1551,7 +1553,7 @@ entry:
     <vscale x 8 x i16> %1,
     i16 %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1560,9 +1562,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vmaxu_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmaxu_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1573,7 +1575,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1583,10 +1585,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vmaxu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmaxu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1598,7 +1600,7 @@ entry:
     <vscale x 16 x i16> %1,
     i16 %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1607,9 +1609,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vmaxu_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vmaxu_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -1620,7 +1622,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1630,10 +1632,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.i16(
   <vscale x 32 x i16>,
   i16,
   <vscale x 32 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vmaxu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vmaxu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -1645,7 +1647,7 @@ entry:
     <vscale x 32 x i16> %1,
     i16 %2,
     <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1654,9 +1656,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   i32,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vmaxu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmaxu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1667,7 +1669,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
     i32 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1677,10 +1679,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vmaxu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmaxu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1692,7 +1694,7 @@ entry:
     <vscale x 1 x i32> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1701,9 +1703,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   i32,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vmaxu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmaxu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1714,7 +1716,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
     i32 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1724,10 +1726,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vmaxu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmaxu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1739,7 +1741,7 @@ entry:
     <vscale x 2 x i32> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1748,9 +1750,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   i32,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vmaxu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmaxu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1761,7 +1763,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
     i32 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1771,10 +1773,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vmaxu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmaxu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1786,7 +1788,7 @@ entry:
     <vscale x 4 x i32> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1795,9 +1797,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   i32,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vmaxu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmaxu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1808,7 +1810,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
     i32 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1818,10 +1820,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vmaxu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmaxu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1833,7 +1835,7 @@ entry:
     <vscale x 8 x i32> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1842,9 +1844,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   i32,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vmaxu_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vmaxu_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -1855,7 +1857,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
     i32 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1865,10 +1867,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.i32(
   <vscale x 16 x i32>,
   i32,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vmaxu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vmaxu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -1880,7 +1882,7 @@ entry:
     <vscale x 16 x i32> %1,
     i32 %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1889,20 +1891,32 @@ declare <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vmaxu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vmaxu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmaxu_vx_nxv1i64_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vmaxu.vv v8, v8, v9
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmaxu_vx_nxv1i64_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT:    vmaxu.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.i64(
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
     i64 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1912,22 +1926,34 @@ declare <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
   <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vmaxu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vmaxu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmaxu_mask_vx_nxv1i64_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vmaxu.vv v8, v9, v10, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmaxu_mask_vx_nxv1i64_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT:    vmaxu.vx v8, v9, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     i64 %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1936,20 +1962,32 @@ declare <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vmaxu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vmaxu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmaxu_vx_nxv2i64_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vmaxu.vv v8, v8, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmaxu_vx_nxv2i64_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT:    vmaxu.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.i64(
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
     i64 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1959,22 +1997,34 @@ declare <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
   <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vmaxu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vmaxu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vmaxu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmaxu_mask_vx_nxv2i64_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vmaxu.vv v8, v10, v12, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmaxu_mask_vx_nxv2i64_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT:    vmaxu.vx v8, v10, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     i64 %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1983,20 +2033,32 @@ declare <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vmaxu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vmaxu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmaxu_vx_nxv4i64_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vmaxu.vv v8, v8, v12
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmaxu_vx_nxv4i64_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT:    vmaxu.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.i64(
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
     i64 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -2006,22 +2068,34 @@ declare <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
   <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vmaxu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vmaxu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vmaxu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmaxu_mask_vx_nxv4i64_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vmaxu.vv v8, v12, v16, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmaxu_mask_vx_nxv4i64_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT:    vmaxu.vx v8, v12, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     i64 %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -2030,20 +2104,32 @@ declare <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vmaxu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vmaxu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmaxu_vx_nxv8i64_nxv8i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vmaxu.vv v8, v8, v16
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmaxu_vx_nxv8i64_nxv8i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT:    vmaxu.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.i64(
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
     i64 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -2053,22 +2139,34 @@ declare <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.i64(
   <vscale x 8 x i64>,
   i64,
   <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vmaxu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vmaxu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vmaxu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmaxu_mask_vx_nxv8i64_nxv8i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT:    vlse64.v v24, (a0), zero
+; RV32-NEXT:    vmaxu.vv v8, v16, v24, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmaxu_mask_vx_nxv8i64_nxv8i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT:    vmaxu.vx v8, v16, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
     i64 %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll
deleted file mode 100644
index 4fd2f65341e46..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll
+++ /dev/null
@@ -1,2074 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vmin_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vmin_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vmin_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vmin_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vmin_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vmin_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vmin_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vmin.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vmin_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vmin_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vmin.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vmin_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vmin_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vmin.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i1>,
-  i64,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8r.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    <vscale x 64 x i8> %2,
-    <vscale x 64 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vmin_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vmin_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vmin_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vmin_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vmin_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vmin_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vmin_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vmin.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vmin_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vmin_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vmin.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vmin_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vmin_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vmin.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re16.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    <vscale x 32 x i16> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vmin_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vmin_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vmin_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vmin_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vmin_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmin.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vmin_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vmin_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmin.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vmin_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vmin_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vmin.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re32.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vmin_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vmin_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vmin_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vmin.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vmin_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vmin_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vmin.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vmin_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vmin_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT:    vmin.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re64.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i8,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vmin_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vmin_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i8 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i8,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vmin_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv2i8_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vmin_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i8_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i8 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i8,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vmin_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv4i8_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vmin_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i8_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i8 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i8,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vmin_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv8i8_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vmin_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i8_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i8 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i8,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vmin_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv16i8_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vmin_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i8_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vmin.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i8 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i8,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vmin_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv32i8_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vmin_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv32i8_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vmin.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i8 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i8,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vmin_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv64i8_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i8,
-  <vscale x 64 x i1>,
-  i64,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vmin_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv64i8_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vmin.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    i8 %2,
-    <vscale x 64 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i16,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vmin_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv1i16_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vmin_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i16_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i16 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i16,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vmin_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv2i16_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vmin_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i16_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i16 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i16,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vmin_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv4i16_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vmin_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i16_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i16 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i16,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vmin_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv8i16_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vmin_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i16_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vmin.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i16 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i16,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vmin_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv16i16_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vmin_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i16_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vmin.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i16 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i16,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vmin_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv32i16_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i16,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vmin_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv32i16_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vmin.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i16 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vmin_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv1i32_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vmin_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i32_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vmin_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv2i32_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vmin_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i32_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vmin_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv4i32_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vmin_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i32_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vmin.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vmin_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv8i32_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vmin_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i32_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vmin.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vmin_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv16i32_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vmin_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i32_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vmin.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i32 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vmin_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vmin_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vmin_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vmin_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vmin.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vmin_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vmin_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vmin.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vmin_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vmin_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vmin.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i64 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmin.ll
similarity index 80%
rename from llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vmin.ll
index 26cc6d9ef007f..c70107b84a16e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmin.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
 declare <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -18,7 +20,7 @@ entry:
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -28,10 +30,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -43,7 +45,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -52,9 +54,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vmin_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmin_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -65,7 +67,7 @@ entry:
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -75,10 +77,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vmin_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmin_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -90,7 +92,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -99,9 +101,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vmin_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmin_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -112,7 +114,7 @@ entry:
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -122,10 +124,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vmin_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmin_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -137,7 +139,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -146,9 +148,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vmin_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmin_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -159,7 +161,7 @@ entry:
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -169,10 +171,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vmin_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmin_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -184,7 +186,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -193,9 +195,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vmin_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmin_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -206,7 +208,7 @@ entry:
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -216,10 +218,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vmin_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmin_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -231,7 +233,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -240,9 +242,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vmin_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmin_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -253,7 +255,7 @@ entry:
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
@@ -263,10 +265,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vmin_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmin_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -278,7 +280,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -287,9 +289,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vmin_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vmin_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
@@ -300,7 +302,7 @@ entry:
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i8> %a
 }
@@ -310,10 +312,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8r.v v24, (a0)
@@ -326,7 +328,7 @@ entry:
     <vscale x 64 x i8> %1,
     <vscale x 64 x i8> %2,
     <vscale x 64 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 64 x i8> %a
 }
@@ -335,9 +337,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vmin_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmin_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -348,7 +350,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -358,10 +360,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vmin_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmin_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -373,7 +375,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -382,9 +384,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vmin_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmin_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -395,7 +397,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -405,10 +407,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vmin_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmin_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -420,7 +422,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -429,9 +431,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vmin_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmin_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -442,7 +444,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -452,10 +454,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vmin_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmin_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -467,7 +469,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -476,9 +478,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vmin_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmin_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -489,7 +491,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -499,10 +501,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vmin_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmin_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -514,7 +516,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -523,9 +525,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vmin_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmin_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -536,7 +538,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -546,10 +548,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vmin_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmin_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -561,7 +563,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -570,9 +572,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vmin_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vmin_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
@@ -583,7 +585,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -593,10 +595,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
@@ -609,7 +611,7 @@ entry:
     <vscale x 32 x i16> %1,
     <vscale x 32 x i16> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -618,9 +620,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vmin_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmin_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -631,7 +633,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -641,10 +643,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vmin_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmin_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -656,7 +658,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -665,9 +667,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vmin_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmin_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -678,7 +680,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -688,10 +690,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vmin_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmin_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -703,7 +705,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -712,9 +714,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vmin_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmin_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -725,7 +727,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -735,10 +737,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vmin_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmin_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -750,7 +752,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -759,9 +761,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vmin_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmin_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -772,7 +774,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -782,10 +784,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vmin_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmin_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -797,7 +799,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -806,9 +808,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vmin_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vmin_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
@@ -819,7 +821,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -829,10 +831,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
@@ -845,7 +847,7 @@ entry:
     <vscale x 16 x i32> %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -854,9 +856,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vmin_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vmin_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -867,7 +869,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -877,10 +879,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vmin_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vmin_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -892,7 +894,7 @@ entry:
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -901,9 +903,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vmin_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vmin_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -914,7 +916,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -924,10 +926,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vmin_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vmin_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -939,7 +941,7 @@ entry:
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -948,9 +950,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vmin_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vmin_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -961,7 +963,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -971,10 +973,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vmin_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vmin_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -986,7 +988,7 @@ entry:
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -995,9 +997,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vmin_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vmin_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
@@ -1008,7 +1010,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1018,10 +1020,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
@@ -1034,7 +1036,7 @@ entry:
     <vscale x 8 x i64> %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1043,9 +1045,9 @@ declare <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vmin_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmin_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -1056,7 +1058,7 @@ entry:
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -1066,10 +1068,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vmin_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmin_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -1081,7 +1083,7 @@ entry:
     <vscale x 1 x i8> %1,
     i8 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -1090,9 +1092,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vmin_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmin_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -1103,7 +1105,7 @@ entry:
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -1113,10 +1115,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vmin_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmin_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -1128,7 +1130,7 @@ entry:
     <vscale x 2 x i8> %1,
     i8 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -1137,9 +1139,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vmin_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmin_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -1150,7 +1152,7 @@ entry:
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -1160,10 +1162,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vmin_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmin_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -1175,7 +1177,7 @@ entry:
     <vscale x 4 x i8> %1,
     i8 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -1184,9 +1186,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vmin_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmin_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -1197,7 +1199,7 @@ entry:
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1207,10 +1209,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vmin_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmin_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -1222,7 +1224,7 @@ entry:
     <vscale x 8 x i8> %1,
     i8 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1231,9 +1233,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vmin_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmin_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -1244,7 +1246,7 @@ entry:
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1254,10 +1256,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vmin_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmin_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -1269,7 +1271,7 @@ entry:
     <vscale x 16 x i8> %1,
     i8 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1278,9 +1280,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vmin_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmin_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -1291,7 +1293,7 @@ entry:
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1301,10 +1303,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vmin_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmin_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1316,7 +1318,7 @@ entry:
     <vscale x 32 x i8> %1,
     i8 %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1325,9 +1327,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vmin_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vmin_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vx_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
@@ -1338,7 +1340,7 @@ entry:
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i8> %a
 }
@@ -1348,10 +1350,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.i8(
   <vscale x 64 x i8>,
   i8,
   <vscale x 64 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vmin_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vmin_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
@@ -1363,7 +1365,7 @@ entry:
     <vscale x 64 x i8> %1,
     i8 %2,
     <vscale x 64 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 64 x i8> %a
 }
@@ -1372,9 +1374,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vmin_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmin_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1385,7 +1387,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1395,10 +1397,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vmin_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmin_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1410,7 +1412,7 @@ entry:
     <vscale x 1 x i16> %1,
     i16 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1419,9 +1421,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vmin_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmin_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1432,7 +1434,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1442,10 +1444,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vmin_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmin_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1457,7 +1459,7 @@ entry:
     <vscale x 2 x i16> %1,
     i16 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1466,9 +1468,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vmin_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmin_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1479,7 +1481,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1489,10 +1491,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vmin_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmin_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1504,7 +1506,7 @@ entry:
     <vscale x 4 x i16> %1,
     i16 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1513,9 +1515,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vmin_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmin_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1526,7 +1528,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1536,10 +1538,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vmin_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmin_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1551,7 +1553,7 @@ entry:
     <vscale x 8 x i16> %1,
     i16 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1560,9 +1562,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vmin_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmin_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1573,7 +1575,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1583,10 +1585,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vmin_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmin_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1598,7 +1600,7 @@ entry:
     <vscale x 16 x i16> %1,
     i16 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1607,9 +1609,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vmin_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vmin_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vx_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -1620,7 +1622,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1630,10 +1632,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.i16(
   <vscale x 32 x i16>,
   i16,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vmin_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vmin_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -1645,7 +1647,7 @@ entry:
     <vscale x 32 x i16> %1,
     i16 %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1654,9 +1656,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vmin_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmin_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1667,7 +1669,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1677,10 +1679,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vmin_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmin_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1692,7 +1694,7 @@ entry:
     <vscale x 1 x i32> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1701,9 +1703,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vmin_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmin_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1714,7 +1716,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1724,10 +1726,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vmin_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmin_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1739,7 +1741,7 @@ entry:
     <vscale x 2 x i32> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1748,9 +1750,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vmin_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmin_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1761,7 +1763,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1771,10 +1773,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vmin_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmin_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1786,7 +1788,7 @@ entry:
     <vscale x 4 x i32> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1795,9 +1797,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vmin_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmin_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1808,7 +1810,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1818,10 +1820,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vmin_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmin_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1833,7 +1835,7 @@ entry:
     <vscale x 8 x i32> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1842,9 +1844,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vmin_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vmin_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_vx_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -1855,7 +1857,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1865,10 +1867,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.i32(
   <vscale x 16 x i32>,
   i32,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vmin_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vmin_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -1880,7 +1882,7 @@ entry:
     <vscale x 16 x i32> %1,
     i32 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1889,26 +1891,32 @@ declare <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   i64,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vmin_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT:    vlse64.v v9, (a0), zero
-; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vmin_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmin_vx_nxv1i64_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vmin.vv v8, v8, v9
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmin_vx_nxv1i64_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT:    vmin.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.i64(
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1918,28 +1926,34 @@ declare <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
   <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vmin_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vmin_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmin_mask_vx_nxv1i64_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vmin.vv v8, v9, v10, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmin_mask_vx_nxv1i64_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT:    vmin.vx v8, v9, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     i64 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1948,26 +1962,32 @@ declare <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   i64,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vmin_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vmin.vv v8, v8, v10
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vmin_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmin_vx_nxv2i64_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vmin.vv v8, v8, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmin_vx_nxv2i64_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT:    vmin.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.i64(
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1977,28 +1997,34 @@ declare <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
   <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vmin_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vmin_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmin_mask_vx_nxv2i64_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vmin.vv v8, v10, v12, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmin_mask_vx_nxv2i64_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT:    vmin.vx v8, v10, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     i64 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -2007,26 +2033,32 @@ declare <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   i64,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vmin_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vmin.vv v8, v8, v12
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vmin_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmin_vx_nxv4i64_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vmin.vv v8, v8, v12
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmin_vx_nxv4i64_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT:    vmin.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.i64(
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -2036,28 +2068,34 @@ declare <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
   <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vmin_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vmin_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmin_mask_vx_nxv4i64_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vmin.vv v8, v12, v16, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmin_mask_vx_nxv4i64_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT:    vmin.vx v8, v12, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     i64 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -2066,26 +2104,32 @@ declare <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   i64,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vmin_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmin_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vmin.vv v8, v8, v16
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vmin_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmin_vx_nxv8i64_nxv8i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vmin.vv v8, v8, v16
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmin_vx_nxv8i64_nxv8i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT:    vmin.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.i64(
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -2095,28 +2139,34 @@ declare <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.i64(
   <vscale x 8 x i64>,
   i64,
   <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vmin_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
-; CHECK-NEXT:    vlse64.v v24, (a0), zero
-; CHECK-NEXT:    vmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vmin_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmin_mask_vx_nxv8i64_nxv8i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT:    vlse64.v v24, (a0), zero
+; RV32-NEXT:    vmin.vv v8, v16, v24, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmin_mask_vx_nxv8i64_nxv8i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT:    vmin.vx v8, v16, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
     i64 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll
deleted file mode 100644
index 6bccfb2d46481..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll
+++ /dev/null
@@ -1,2074 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vminu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vminu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vminu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vminu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vminu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vminu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vminu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vminu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vminu.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vminu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vminu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vminu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vminu.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vminu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vminu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vminu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vminu.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i1>,
-  i64,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8r.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vminu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    <vscale x 64 x i8> %2,
-    <vscale x 64 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vminu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vminu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vminu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vminu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vminu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vminu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vminu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vminu.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vminu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vminu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vminu_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vminu.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vminu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vminu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vminu_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vminu.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re16.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vminu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    <vscale x 32 x i16> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vminu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vminu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vminu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vminu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vminu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vminu.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vminu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vminu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vminu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vminu.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vminu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vminu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vminu_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vminu.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re32.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vminu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vminu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vminu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vminu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vminu.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vminu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vminu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vminu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vminu.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vminu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vminu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vminu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT:    vminu.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re64.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vminu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i8,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vminu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vminu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i8 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i8,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vminu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv2i8_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vminu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i8_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i8 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i8,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vminu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv4i8_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vminu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i8_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i8 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i8,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vminu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv8i8_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vminu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i8_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i8 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i8,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vminu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv16i8_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vminu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i8_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vminu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i8 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i8,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vminu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv32i8_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vminu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv32i8_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vminu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i8 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i8,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vminu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv64i8_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i8,
-  <vscale x 64 x i1>,
-  i64,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vminu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv64i8_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vminu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    i8 %2,
-    <vscale x 64 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i16,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vminu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv1i16_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vminu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i16_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i16 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i16,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vminu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv2i16_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vminu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i16_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i16 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i16,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vminu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv4i16_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vminu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i16_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i16 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i16,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vminu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv8i16_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vminu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i16_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vminu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i16 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i16,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vminu_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv16i16_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vminu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i16_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vminu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i16 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i16,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vminu_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv32i16_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i16,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vminu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv32i16_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vminu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i16 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vminu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv1i32_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vminu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i32_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vminu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv2i32_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vminu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i32_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vminu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv4i32_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vminu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i32_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vminu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vminu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv8i32_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vminu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i32_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vminu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vminu_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv16i32_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vminu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i32_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vminu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i32 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vminu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vminu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vminu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vminu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vminu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vminu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vminu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vminu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vminu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vminu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vminu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i64 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vminu.ll
similarity index 80%
rename from llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vminu.ll
index a71e26000e3cf..f4c67554703fa 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vminu.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
 declare <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -18,7 +20,7 @@ entry:
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -28,10 +30,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vminu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vminu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -43,7 +45,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -52,9 +54,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vminu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vminu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -65,7 +67,7 @@ entry:
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -75,10 +77,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vminu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vminu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -90,7 +92,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -99,9 +101,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vminu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vminu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -112,7 +114,7 @@ entry:
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -122,10 +124,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vminu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vminu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -137,7 +139,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -146,9 +148,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vminu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vminu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -159,7 +161,7 @@ entry:
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -169,10 +171,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vminu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vminu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -184,7 +186,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -193,9 +195,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vminu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vminu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -206,7 +208,7 @@ entry:
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -216,10 +218,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vminu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vminu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -231,7 +233,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -240,9 +242,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vminu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vminu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -253,7 +255,7 @@ entry:
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
@@ -263,10 +265,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vminu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vminu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -278,7 +280,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -287,9 +289,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vminu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vminu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
@@ -300,7 +302,7 @@ entry:
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i8> %a
 }
@@ -310,10 +312,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8r.v v24, (a0)
@@ -326,7 +328,7 @@ entry:
     <vscale x 64 x i8> %1,
     <vscale x 64 x i8> %2,
     <vscale x 64 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 64 x i8> %a
 }
@@ -335,9 +337,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vminu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vminu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -348,7 +350,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -358,10 +360,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vminu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vminu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -373,7 +375,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -382,9 +384,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vminu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vminu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -395,7 +397,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -405,10 +407,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vminu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vminu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -420,7 +422,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -429,9 +431,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vminu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vminu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -442,7 +444,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -452,10 +454,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vminu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vminu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -467,7 +469,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -476,9 +478,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vminu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vminu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -489,7 +491,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -499,10 +501,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vminu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vminu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -514,7 +516,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -523,9 +525,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vminu_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vminu_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -536,7 +538,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -546,10 +548,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vminu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vminu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -561,7 +563,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -570,9 +572,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vminu_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vminu_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
@@ -583,7 +585,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -593,10 +595,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
@@ -609,7 +611,7 @@ entry:
     <vscale x 32 x i16> %1,
     <vscale x 32 x i16> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -618,9 +620,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vminu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vminu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -631,7 +633,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -641,10 +643,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vminu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vminu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -656,7 +658,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -665,9 +667,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vminu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vminu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -678,7 +680,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -688,10 +690,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vminu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vminu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -703,7 +705,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -712,9 +714,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vminu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vminu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -725,7 +727,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -735,10 +737,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vminu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vminu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -750,7 +752,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -759,9 +761,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vminu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vminu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -772,7 +774,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -782,10 +784,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vminu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vminu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -797,7 +799,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -806,9 +808,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vminu_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vminu_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
@@ -819,7 +821,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -829,10 +831,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
@@ -845,7 +847,7 @@ entry:
     <vscale x 16 x i32> %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -854,9 +856,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vminu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vminu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -867,7 +869,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -877,10 +879,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vminu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vminu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -892,7 +894,7 @@ entry:
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -901,9 +903,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vminu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vminu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -914,7 +916,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -924,10 +926,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vminu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vminu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -939,7 +941,7 @@ entry:
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -948,9 +950,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vminu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vminu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -961,7 +963,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -971,10 +973,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vminu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vminu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -986,7 +988,7 @@ entry:
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -995,9 +997,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vminu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vminu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
@@ -1008,7 +1010,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1018,10 +1020,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
@@ -1034,7 +1036,7 @@ entry:
     <vscale x 8 x i64> %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1043,9 +1045,9 @@ declare <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vminu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vminu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -1056,7 +1058,7 @@ entry:
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -1066,10 +1068,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vminu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vminu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -1081,7 +1083,7 @@ entry:
     <vscale x 1 x i8> %1,
     i8 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -1090,9 +1092,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vminu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vminu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -1103,7 +1105,7 @@ entry:
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -1113,10 +1115,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vminu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vminu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -1128,7 +1130,7 @@ entry:
     <vscale x 2 x i8> %1,
     i8 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -1137,9 +1139,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vminu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vminu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -1150,7 +1152,7 @@ entry:
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -1160,10 +1162,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vminu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vminu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -1175,7 +1177,7 @@ entry:
     <vscale x 4 x i8> %1,
     i8 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -1184,9 +1186,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vminu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vminu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -1197,7 +1199,7 @@ entry:
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1207,10 +1209,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vminu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vminu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -1222,7 +1224,7 @@ entry:
     <vscale x 8 x i8> %1,
     i8 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1231,9 +1233,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vminu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vminu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -1244,7 +1246,7 @@ entry:
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1254,10 +1256,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vminu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vminu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -1269,7 +1271,7 @@ entry:
     <vscale x 16 x i8> %1,
     i8 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1278,9 +1280,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vminu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vminu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -1291,7 +1293,7 @@ entry:
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1301,10 +1303,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vminu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vminu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1316,7 +1318,7 @@ entry:
     <vscale x 32 x i8> %1,
     i8 %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1325,9 +1327,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vminu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vminu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vx_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
@@ -1338,7 +1340,7 @@ entry:
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i8> %a
 }
@@ -1348,10 +1350,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.i8(
   <vscale x 64 x i8>,
   i8,
   <vscale x 64 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vminu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vminu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
@@ -1363,7 +1365,7 @@ entry:
     <vscale x 64 x i8> %1,
     i8 %2,
     <vscale x 64 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 64 x i8> %a
 }
@@ -1372,9 +1374,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vminu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vminu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1385,7 +1387,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1395,10 +1397,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vminu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vminu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1410,7 +1412,7 @@ entry:
     <vscale x 1 x i16> %1,
     i16 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1419,9 +1421,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vminu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vminu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1432,7 +1434,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1442,10 +1444,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vminu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vminu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1457,7 +1459,7 @@ entry:
     <vscale x 2 x i16> %1,
     i16 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1466,9 +1468,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vminu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vminu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1479,7 +1481,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1489,10 +1491,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vminu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vminu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1504,7 +1506,7 @@ entry:
     <vscale x 4 x i16> %1,
     i16 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1513,9 +1515,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vminu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vminu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1526,7 +1528,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1536,10 +1538,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vminu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vminu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1551,7 +1553,7 @@ entry:
     <vscale x 8 x i16> %1,
     i16 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1560,9 +1562,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vminu_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vminu_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1573,7 +1575,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1583,10 +1585,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vminu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vminu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1598,7 +1600,7 @@ entry:
     <vscale x 16 x i16> %1,
     i16 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1607,9 +1609,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vminu_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vminu_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vx_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -1620,7 +1622,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1630,10 +1632,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.i16(
   <vscale x 32 x i16>,
   i16,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vminu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vminu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -1645,7 +1647,7 @@ entry:
     <vscale x 32 x i16> %1,
     i16 %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1654,9 +1656,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vminu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vminu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1667,7 +1669,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1677,10 +1679,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vminu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vminu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1692,7 +1694,7 @@ entry:
     <vscale x 1 x i32> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1701,9 +1703,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vminu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vminu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1714,7 +1716,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1724,10 +1726,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vminu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vminu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1739,7 +1741,7 @@ entry:
     <vscale x 2 x i32> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1748,9 +1750,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vminu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vminu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1761,7 +1763,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1771,10 +1773,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vminu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vminu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1786,7 +1788,7 @@ entry:
     <vscale x 4 x i32> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1795,9 +1797,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vminu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vminu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1808,7 +1810,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1818,10 +1820,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vminu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vminu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1833,7 +1835,7 @@ entry:
     <vscale x 8 x i32> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1842,9 +1844,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vminu_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vminu_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_vx_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -1855,7 +1857,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1865,10 +1867,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.i32(
   <vscale x 16 x i32>,
   i32,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vminu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vminu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -1880,7 +1882,7 @@ entry:
     <vscale x 16 x i32> %1,
     i32 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1889,26 +1891,32 @@ declare <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   i64,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vminu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT:    vlse64.v v9, (a0), zero
-; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vminu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vminu_vx_nxv1i64_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vminu.vv v8, v8, v9
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vminu_vx_nxv1i64_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT:    vminu.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.i64(
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1918,28 +1926,34 @@ declare <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
   <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vminu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vminu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vminu_mask_vx_nxv1i64_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vminu.vv v8, v9, v10, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vminu_mask_vx_nxv1i64_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT:    vminu.vx v8, v9, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     i64 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1948,26 +1962,32 @@ declare <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   i64,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vminu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vminu.vv v8, v8, v10
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vminu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vminu_vx_nxv2i64_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vminu.vv v8, v8, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vminu_vx_nxv2i64_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT:    vminu.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.i64(
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1977,28 +1997,34 @@ declare <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
   <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vminu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vminu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vminu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vminu_mask_vx_nxv2i64_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vminu.vv v8, v10, v12, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vminu_mask_vx_nxv2i64_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT:    vminu.vx v8, v10, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     i64 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -2007,26 +2033,32 @@ declare <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   i64,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vminu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vminu.vv v8, v8, v12
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vminu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vminu_vx_nxv4i64_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vminu.vv v8, v8, v12
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vminu_vx_nxv4i64_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT:    vminu.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.i64(
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -2036,28 +2068,34 @@ declare <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
   <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vminu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vminu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vminu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vminu_mask_vx_nxv4i64_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vminu.vv v8, v12, v16, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vminu_mask_vx_nxv4i64_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT:    vminu.vx v8, v12, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     i64 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -2066,26 +2104,32 @@ declare <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   i64,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vminu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vminu_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vminu.vv v8, v8, v16
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vminu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vminu_vx_nxv8i64_nxv8i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vminu.vv v8, v8, v16
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vminu_vx_nxv8i64_nxv8i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT:    vminu.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.i64(
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -2095,28 +2139,34 @@ declare <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.i64(
   <vscale x 8 x i64>,
   i64,
   <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vminu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
-; CHECK-NEXT:    vlse64.v v24, (a0), zero
-; CHECK-NEXT:    vminu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vminu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vminu_mask_vx_nxv8i64_nxv8i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT:    vlse64.v v24, (a0), zero
+; RV32-NEXT:    vminu.vv v8, v16, v24, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vminu_mask_vx_nxv8i64_nxv8i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT:    vminu.vx v8, v16, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
     i64 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv64.ll
deleted file mode 100644
index 2b40057211f0f..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv64.ll
+++ /dev/null
@@ -1,882 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vmsbc.vv v0, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vmsbc.vv v0, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i64);
-
-define <vscale x 64 x i1> @intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vmsbc.vv v0, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 64 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vmsbc.vv v0, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vmsbc.vv v0, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vmsbc.vv v0, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmsbc.vv v0, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmsbc.vv v0, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vmsbc.vv v0, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vmsbc.vv v0, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vmsbc.vv v0, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT:    vmsbc.vv v0, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  i8,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  i8,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  i8,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  i8,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  i8,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  i8,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.i8(
-  <vscale x 64 x i8>,
-  i8,
-  i64);
-
-define <vscale x 64 x i1> @intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.i8(
-    <vscale x 64 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 64 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  i16,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  i16,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  i16,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  i16,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  i16,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.i16(
-  <vscale x 32 x i16>,
-  i16,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.i16(
-    <vscale x 32 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  i32,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  i32,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  i32,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  i32,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.i32(
-  <vscale x 16 x i32>,
-  i32,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.i32(
-    <vscale x 16 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  i64,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  i64,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  i64,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.i64(
-  <vscale x 8 x i64>,
-  i64,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.i64(
-    <vscale x 8 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll
deleted file mode 100644
index fa3a6c68f153e..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll
+++ /dev/null
@@ -1,1014 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vmsbc.vvm v10, v8, v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vmsbc.vvm v10, v8, v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vmsbc.vvm v10, v8, v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vmsbc.vvm v10, v8, v9, v0
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vmsbc.vvm v12, v8, v10, v0
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vmsbc.vvm v16, v8, v12, v0
-; CHECK-NEXT:    vmv1r.v v0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i1>,
-  i64);
-
-define <vscale x 64 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vmsbc.vvm v24, v8, v16, v0
-; CHECK-NEXT:    vmv1r.v v0, v24
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    <vscale x 64 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 64 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vmsbc.vvm v10, v8, v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vmsbc.vvm v10, v8, v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vmsbc.vvm v10, v8, v9, v0
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vmsbc.vvm v12, v8, v10, v0
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vmsbc.vvm v16, v8, v12, v0
-; CHECK-NEXT:    vmv1r.v v0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vmsbc.vvm v24, v8, v16, v0
-; CHECK-NEXT:    vmv1r.v v0, v24
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    <vscale x 32 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vmsbc.vvm v10, v8, v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vmsbc.vvm v10, v8, v9, v0
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmsbc.vvm v12, v8, v10, v0
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmsbc.vvm v16, v8, v12, v0
-; CHECK-NEXT:    vmv1r.v v0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vmsbc.vvm v24, v8, v16, v0
-; CHECK-NEXT:    vmv1r.v v0, v24
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vmsbc.vvm v10, v8, v9, v0
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vmsbc.vvm v12, v8, v10, v0
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vmsbc.vvm v16, v8, v12, v0
-; CHECK-NEXT:    vmv1r.v v0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT:    vmsbc.vvm v24, v8, v16, v0
-; CHECK-NEXT:    vmv1r.v v0, v24
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vmsbc.vxm v9, v8, a0, v0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vmsbc.vxm v9, v8, a0, v0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vmsbc.vxm v9, v8, a0, v0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vmsbc.vxm v9, v8, a0, v0
-; CHECK-NEXT:    vmv.v.v v0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vmsbc.vxm v10, v8, a0, v0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vmsbc.vxm v12, v8, a0, v0
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    <vscale x 32 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8(
-  <vscale x 64 x i8>,
-  i8,
-  <vscale x 64 x i1>,
-  i64);
-
-define <vscale x 64 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT:    vmsbc.vxm v16, v8, a0, v0
-; CHECK-NEXT:    vmv1r.v v0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8(
-    <vscale x 64 x i8> %0,
-    i8 %1,
-    <vscale x 64 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 64 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vmsbc.vxm v9, v8, a0, v0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vmsbc.vxm v9, v8, a0, v0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vmsbc.vxm v9, v8, a0, v0
-; CHECK-NEXT:    vmv.v.v v0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vmsbc.vxm v10, v8, a0, v0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vmsbc.vxm v12, v8, a0, v0
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16(
-  <vscale x 32 x i16>,
-  i16,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT:    vmsbc.vxm v16, v8, a0, v0
-; CHECK-NEXT:    vmv1r.v v0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16(
-    <vscale x 32 x i16> %0,
-    i16 %1,
-    <vscale x 32 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vmsbc.vxm v9, v8, a0, v0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vmsbc.vxm v9, v8, a0, v0
-; CHECK-NEXT:    vmv.v.v v0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vmsbc.vxm v10, v8, a0, v0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vmsbc.vxm v12, v8, a0, v0
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32(
-  <vscale x 16 x i32>,
-  i32,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT:    vmsbc.vxm v16, v8, a0, v0
-; CHECK-NEXT:    vmv1r.v v0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32(
-    <vscale x 16 x i32> %0,
-    i32 %1,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  i64,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vmsbc.vxm v9, v8, a0, v0
-; CHECK-NEXT:    vmv.v.v v0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    i64 %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  i64,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vmsbc.vxm v10, v8, a0, v0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    i64 %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  i64,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vmsbc.vxm v12, v8, a0, v0
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    i64 %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64(
-  <vscale x 8 x i64>,
-  i64,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT:    vmsbc.vxm v16, v8, a0, v0
-; CHECK-NEXT:    vmv1r.v v0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64(
-    <vscale x 8 x i64> %0,
-    i64 %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in.ll
similarity index 83%
rename from llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in.ll
index d0de4e5c2530e..bac8c916e1f91 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
 declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -19,7 +21,7 @@ entry:
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
@@ -28,9 +30,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -42,7 +44,7 @@ entry:
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
@@ -51,9 +53,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -65,7 +67,7 @@ entry:
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
@@ -74,9 +76,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -88,7 +90,7 @@ entry:
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
@@ -97,9 +99,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -111,7 +113,7 @@ entry:
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i1> %a
 }
@@ -120,9 +122,9 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -134,7 +136,7 @@ entry:
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
     <vscale x 32 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 32 x i1> %a
 }
@@ -143,9 +145,9 @@ declare <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+define <vscale x 64 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
@@ -157,7 +159,7 @@ entry:
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
     <vscale x 64 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 64 x i1> %a
 }
@@ -166,9 +168,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -180,7 +182,7 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
@@ -189,9 +191,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -203,7 +205,7 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
@@ -212,9 +214,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -226,7 +228,7 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
@@ -235,9 +237,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -249,7 +251,7 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
@@ -258,9 +260,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -272,7 +274,7 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i1> %a
 }
@@ -281,9 +283,9 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
@@ -295,7 +297,7 @@ entry:
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
     <vscale x 32 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 32 x i1> %a
 }
@@ -304,9 +306,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -318,7 +320,7 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
@@ -327,9 +329,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -341,7 +343,7 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
@@ -350,9 +352,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -364,7 +366,7 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
@@ -373,9 +375,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -387,7 +389,7 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
@@ -396,9 +398,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
@@ -410,7 +412,7 @@ entry:
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i1> %a
 }
@@ -419,9 +421,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -433,7 +435,7 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
@@ -442,9 +444,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -456,7 +458,7 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
@@ -465,9 +467,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -479,7 +481,7 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
@@ -488,9 +490,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
@@ -502,7 +504,7 @@ entry:
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
@@ -511,9 +513,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -525,7 +527,7 @@ entry:
     <vscale x 1 x i8> %0,
     i8 %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
@@ -534,9 +536,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -548,7 +550,7 @@ entry:
     <vscale x 2 x i8> %0,
     i8 %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
@@ -557,9 +559,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -571,7 +573,7 @@ entry:
     <vscale x 4 x i8> %0,
     i8 %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
@@ -580,9 +582,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -594,7 +596,7 @@ entry:
     <vscale x 8 x i8> %0,
     i8 %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
@@ -603,9 +605,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -617,7 +619,7 @@ entry:
     <vscale x 16 x i8> %0,
     i8 %1,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i1> %a
 }
@@ -626,9 +628,9 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -640,7 +642,7 @@ entry:
     <vscale x 32 x i8> %0,
     i8 %1,
     <vscale x 32 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 32 x i1> %a
 }
@@ -649,9 +651,9 @@ declare <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8(
   <vscale x 64 x i8>,
   i8,
   <vscale x 64 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+define <vscale x 64 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
@@ -663,7 +665,7 @@ entry:
     <vscale x 64 x i8> %0,
     i8 %1,
     <vscale x 64 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 64 x i1> %a
 }
@@ -672,9 +674,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -686,7 +688,7 @@ entry:
     <vscale x 1 x i16> %0,
     i16 %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
@@ -695,9 +697,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -709,7 +711,7 @@ entry:
     <vscale x 2 x i16> %0,
     i16 %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
@@ -718,9 +720,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -732,7 +734,7 @@ entry:
     <vscale x 4 x i16> %0,
     i16 %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
@@ -741,9 +743,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -755,7 +757,7 @@ entry:
     <vscale x 8 x i16> %0,
     i16 %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
@@ -764,9 +766,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -778,7 +780,7 @@ entry:
     <vscale x 16 x i16> %0,
     i16 %1,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i1> %a
 }
@@ -787,9 +789,9 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16(
   <vscale x 32 x i16>,
   i16,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -801,7 +803,7 @@ entry:
     <vscale x 32 x i16> %0,
     i16 %1,
     <vscale x 32 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 32 x i1> %a
 }
@@ -810,9 +812,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -824,7 +826,7 @@ entry:
     <vscale x 1 x i32> %0,
     i32 %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
@@ -833,9 +835,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -847,7 +849,7 @@ entry:
     <vscale x 2 x i32> %0,
     i32 %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
@@ -856,9 +858,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -870,7 +872,7 @@ entry:
     <vscale x 4 x i32> %0,
     i32 %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
@@ -879,9 +881,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -893,7 +895,7 @@ entry:
     <vscale x 8 x i32> %0,
     i32 %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
@@ -902,9 +904,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32(
   <vscale x 16 x i32>,
   i32,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -916,7 +918,7 @@ entry:
     <vscale x 16 x i32> %0,
     i32 %1,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i1> %a
 }
@@ -925,27 +927,34 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
   <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vmsbc.vvm v9, v8, v10, v0
-; CHECK-NEXT:    vmv.v.v v0, v9
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vmsbc.vvm v9, v8, v10, v0
+; RV32-NEXT:    vmv.v.v v0, v9
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT:    vmsbc.vxm v9, v8, a0, v0
+; RV64-NEXT:    vmv.v.v v0, v9
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
@@ -954,27 +963,34 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
   <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vmsbc.vvm v10, v8, v12, v0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vmsbc.vvm v10, v8, v12, v0
+; RV32-NEXT:    vmv1r.v v0, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT:    vmsbc.vxm v10, v8, a0, v0
+; RV64-NEXT:    vmv1r.v v0, v10
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
@@ -983,27 +999,34 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
   <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vmsbc.vvm v12, v8, v16, v0
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vmsbc.vvm v12, v8, v16, v0
+; RV32-NEXT:    vmv1r.v v0, v12
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT:    vmsbc.vxm v12, v8, a0, v0
+; RV64-NEXT:    vmv1r.v v0, v12
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1012,27 +1035,34 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64(
   <vscale x 8 x i64>,
   i64,
   <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT:    vlse64.v v24, (a0), zero
-; CHECK-NEXT:    vmsbc.vvm v16, v8, v24, v0
-; CHECK-NEXT:    vmv1r.v v0, v16
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT:    vlse64.v v24, (a0), zero
+; RV32-NEXT:    vmsbc.vvm v16, v8, v24, v0
+; RV32-NEXT:    vmv1r.v v0, v16
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT:    vmsbc.vxm v16, v8, a0, v0
+; RV64-NEXT:    vmv1r.v v0, v16
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64(
     <vscale x 8 x i64> %0,
     i64 %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc.ll
similarity index 77%
rename from llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vmsbc.ll
index 0eac79564ce9e..36238cef44d83 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
 declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -16,7 +18,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -24,9 +26,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -36,7 +38,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -44,9 +46,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -56,7 +58,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -64,9 +66,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -76,7 +78,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -84,9 +86,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -96,7 +98,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -104,9 +106,9 @@ entry:
 declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -116,7 +118,7 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i1> %a
 }
@@ -124,9 +126,9 @@ entry:
 declare <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i1> @intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+define <vscale x 64 x i1> @intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
@@ -136,7 +138,7 @@ entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i1> %a
 }
@@ -144,9 +146,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -156,7 +158,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -164,9 +166,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -176,7 +178,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -184,9 +186,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -196,7 +198,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -204,9 +206,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -216,7 +218,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -224,9 +226,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -236,7 +238,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -244,9 +246,9 @@ entry:
 declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
@@ -256,7 +258,7 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i1> %a
 }
@@ -264,9 +266,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -276,7 +278,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -284,9 +286,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -296,7 +298,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -304,9 +306,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -316,7 +318,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -324,9 +326,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -336,7 +338,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -344,9 +346,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
@@ -356,7 +358,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -364,9 +366,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -376,7 +378,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -384,9 +386,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -396,7 +398,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -404,9 +406,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -416,7 +418,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -424,9 +426,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
@@ -436,7 +438,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -444,9 +446,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -456,7 +458,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -464,9 +466,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -476,7 +478,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -484,9 +486,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -496,7 +498,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -504,9 +506,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -516,7 +518,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -524,9 +526,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -536,7 +538,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -544,9 +546,9 @@ entry:
 declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -556,7 +558,7 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i1> %a
 }
@@ -564,9 +566,9 @@ entry:
 declare <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.i8(
   <vscale x 64 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i1> @intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 64 x i1> @intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
@@ -576,7 +578,7 @@ entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.i8(
     <vscale x 64 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i1> %a
 }
@@ -584,9 +586,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -596,7 +598,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -604,9 +606,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -616,7 +618,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -624,9 +626,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -636,7 +638,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -644,9 +646,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -656,7 +658,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -664,9 +666,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -676,7 +678,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -684,9 +686,9 @@ entry:
 declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.i16(
   <vscale x 32 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -696,7 +698,7 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.i16(
     <vscale x 32 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i1> %a
 }
@@ -704,9 +706,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -716,7 +718,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -724,9 +726,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -736,7 +738,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -744,9 +746,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -756,7 +758,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -764,9 +766,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -776,7 +778,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -784,9 +786,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.i32(
   <vscale x 16 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -796,7 +798,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.i32(
     <vscale x 16 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -804,25 +806,31 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT:    vlse64.v v9, (a0), zero
-; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vmsbc.vv v0, v8, v9
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT:    vmsbc.vx v0, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -830,25 +838,31 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vmsbc.vv v0, v8, v10
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vmsbc.vv v0, v8, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT:    vmsbc.vx v0, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -856,25 +870,31 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vmsbc.vv v0, v8, v12
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vmsbc.vv v0, v8, v12
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT:    vmsbc.vx v0, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -882,25 +902,31 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.i64(
   <vscale x 8 x i64>,
   i64,
-  i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vmsbc.vv v0, v8, v16
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vmsbc.vv v0, v8, v16
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT:    vmsbc.vx v0, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.i64(
     <vscale x 8 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll
deleted file mode 100644
index 0c0d444eace72..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll
+++ /dev/null
@@ -1,2414 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmseq.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmseq.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8(
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i8> %3,
-    <vscale x 1 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmseq.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmseq.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8(
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i8> %3,
-    <vscale x 2 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmseq.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmseq.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8(
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i8> %3,
-    <vscale x 4 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmseq_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vv_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmseq.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmseq.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8(
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i8> %3,
-    <vscale x 8 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmseq_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vv_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vmseq.vv v0, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmseq.vv v14, v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmseq.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8(
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i8> %3,
-    <vscale x 16 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmseq_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vv_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vmseq.vv v0, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8(
-  <vscale x 32 x i1>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmseq.vv v20, v8, v12
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmseq.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8(
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i8> %3,
-    <vscale x 32 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vmseq.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmseq.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i16> %3,
-    <vscale x 1 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vmseq.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmseq.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16(
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i16> %3,
-    <vscale x 2 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vmseq.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmseq.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16(
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i16> %3,
-    <vscale x 4 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmseq_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vv_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vmseq.vv v0, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vmseq.vv v14, v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmseq.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16(
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i16> %3,
-    <vscale x 8 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmseq_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vv_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vmseq.vv v0, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vmseq.vv v20, v8, v12
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmseq.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16(
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i16> %3,
-    <vscale x 16 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vmseq.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmseq.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    i64 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i32> %3,
-    <vscale x 1 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vmseq.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmseq.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32(
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    i64 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i32> %3,
-    <vscale x 2 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmseq.vv v0, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vmseq.vv v14, v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmseq.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32(
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    i64 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i32> %3,
-    <vscale x 4 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmseq_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vv_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmseq.vv v0, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vmseq.vv v20, v8, v12
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmseq.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32(
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    i64 %4)
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i32> %3,
-    <vscale x 8 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vmseq.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmseq.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    i64 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i64> %3,
-    <vscale x 1 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vmseq.vv v0, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vmseq.vv v14, v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmseq.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64(
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    i64 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i64> %3,
-    <vscale x 2 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vmseq.vv v0, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vmseq.vv v20, v8, v12
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmseq.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64(
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    i64 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i64> %3,
-    <vscale x 4 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  i8,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmseq.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i8> %1,
-    i8 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  i8,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmseq.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i8> %1,
-    i8 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  i8,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmseq.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i8> %1,
-    i8 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  i8,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmseq_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmseq.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i8> %1,
-    i8 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  i8,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmseq_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmseq.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i8> %1,
-    i8 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  i8,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmseq_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8(
-  <vscale x 32 x i1>,
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmseq_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmseq.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i8> %1,
-    i8 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  i16,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmseq.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i16> %1,
-    i16 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  i16,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmseq.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i16> %1,
-    i16 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  i16,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmseq.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i16> %1,
-    i16 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  i16,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmseq_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmseq.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i16> %1,
-    i16 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  i16,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmseq_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmseq.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i16> %1,
-    i16 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  i32,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmseq.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  i32,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmseq.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  i32,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmseq.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  i32,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmseq_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmseq.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  i64,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i64>,
-  i64,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmseq.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  i64,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i64>,
-  i64,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmseq.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  i64,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i64>,
-  i64,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmseq.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    i8 9,
-    i64 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmseq.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i8> %1,
-    i8 9,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    i8 9,
-    i64 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmseq.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i8> %1,
-    i8 9,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    i8 9,
-    i64 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmseq.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i8> %1,
-    i8 9,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmseq_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vi_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    i8 9,
-    i64 %1)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmseq.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i8> %1,
-    i8 9,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmseq_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vi_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    i8 9,
-    i64 %1)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmseq_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmseq.vi v11, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i8> %1,
-    i8 9,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmseq_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vi_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    i8 9,
-    i64 %1)
-
-  ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmseq_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmseq.vi v13, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i8> %1,
-    i8 9,
-    <vscale x 32 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    i16 9,
-    i64 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmseq.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i16> %1,
-    i16 9,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    i16 9,
-    i64 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmseq.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i16> %1,
-    i16 9,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    i16 9,
-    i64 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmseq.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i16> %1,
-    i16 9,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmseq_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vi_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    i16 9,
-    i64 %1)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmseq.vi v11, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i16> %1,
-    i16 9,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmseq_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vi_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    i16 9,
-    i64 %1)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmseq_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmseq.vi v13, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i16> %1,
-    i16 9,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    i32 9,
-    i64 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmseq.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i32> %1,
-    i32 9,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    i32 9,
-    i64 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmseq.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i32> %1,
-    i32 9,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    i32 9,
-    i64 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmseq.vi v11, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i32> %1,
-    i32 9,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmseq_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vi_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    i32 9,
-    i64 %1)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmseq.vi v13, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i32> %1,
-    i32 9,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmseq.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i64> %1,
-    i64 9,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmseq.vi v11, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i64> %1,
-    i64 9,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmseq.vi v13, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i64> %1,
-    i64 9,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmseq.ll
similarity index 84%
rename from llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vmseq.ll
index 7184a7dc02dfd..c8c223c75d5cf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmseq.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
 declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -16,7 +18,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -26,9 +28,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -42,13 +44,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8(
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i8> %3,
     <vscale x 1 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -56,9 +58,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -68,7 +70,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -78,9 +80,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -94,13 +96,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8(
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i8> %3,
     <vscale x 2 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -108,9 +110,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -120,7 +122,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -130,9 +132,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -146,13 +148,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8(
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i8> %3,
     <vscale x 4 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -160,9 +162,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmseq_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmseq_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -172,7 +174,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -182,9 +184,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -198,13 +200,13 @@ entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8(
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i8> %3,
     <vscale x 8 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -212,9 +214,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmseq_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmseq_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -224,7 +226,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -234,9 +236,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -250,13 +252,13 @@ entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8(
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8(
     <vscale x 16 x i1> %0,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i8> %3,
     <vscale x 16 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -264,9 +266,9 @@ entry:
 declare <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmseq_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmseq_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -276,7 +278,7 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i1> %a
 }
@@ -286,9 +288,9 @@ declare <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -302,13 +304,13 @@ entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8(
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8(
     <vscale x 32 x i1> %0,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i8> %3,
     <vscale x 32 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 32 x i1> %a
 }
@@ -316,9 +318,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -328,7 +330,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -338,9 +340,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -354,13 +356,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i16> %3,
     <vscale x 1 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -368,9 +370,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -380,7 +382,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -390,9 +392,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -406,13 +408,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16(
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i16> %3,
     <vscale x 2 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -420,9 +422,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -432,7 +434,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -442,9 +444,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -458,13 +460,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16(
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i16> %3,
     <vscale x 4 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -472,9 +474,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmseq_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmseq_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -484,7 +486,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -494,9 +496,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -510,13 +512,13 @@ entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16(
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i16> %3,
     <vscale x 8 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -524,9 +526,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmseq_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmseq_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -536,7 +538,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -546,9 +548,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -562,13 +564,13 @@ entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16(
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16(
     <vscale x 16 x i1> %0,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i16> %3,
     <vscale x 16 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -576,9 +578,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -588,7 +590,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -598,9 +600,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -614,13 +616,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i32> %3,
     <vscale x 1 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -628,9 +630,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -640,7 +642,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -650,9 +652,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -666,13 +668,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32(
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i32> %3,
     <vscale x 2 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -680,9 +682,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -692,7 +694,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -702,9 +704,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -718,13 +720,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32(
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i32> %3,
     <vscale x 4 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -732,9 +734,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmseq_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmseq_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -744,7 +746,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -754,9 +756,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -770,13 +772,13 @@ entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32(
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i32> %3,
     <vscale x 8 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -784,9 +786,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -796,7 +798,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -806,9 +808,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -822,13 +824,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i64> %3,
     <vscale x 1 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -836,9 +838,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -848,7 +850,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -858,9 +860,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -874,13 +876,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64(
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i64> %3,
     <vscale x 2 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -888,9 +890,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -900,7 +902,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -910,9 +912,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -926,13 +928,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64(
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i64> %3,
     <vscale x 4 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -940,9 +942,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -952,7 +954,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8(
     <vscale x 1 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -962,9 +964,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -979,7 +981,7 @@ entry:
     <vscale x 1 x i8> %1,
     i8 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -987,9 +989,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -999,7 +1001,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.i8(
     <vscale x 2 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1009,9 +1011,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1026,7 +1028,7 @@ entry:
     <vscale x 2 x i8> %1,
     i8 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1034,9 +1036,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -1046,7 +1048,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.i8(
     <vscale x 4 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1056,9 +1058,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1073,7 +1075,7 @@ entry:
     <vscale x 4 x i8> %1,
     i8 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1081,9 +1083,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmseq_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmseq_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -1093,7 +1095,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.i8(
     <vscale x 8 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1103,9 +1105,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1120,7 +1122,7 @@ entry:
     <vscale x 8 x i8> %1,
     i8 %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1128,9 +1130,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmseq_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmseq_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -1140,7 +1142,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.i8(
     <vscale x 16 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1150,9 +1152,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -1167,7 +1169,7 @@ entry:
     <vscale x 16 x i8> %1,
     i8 %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1175,9 +1177,9 @@ entry:
 declare <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmseq_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmseq_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -1187,7 +1189,7 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.i8(
     <vscale x 32 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i1> %a
 }
@@ -1197,9 +1199,9 @@ declare <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmseq_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmseq_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -1214,7 +1216,7 @@ entry:
     <vscale x 32 x i8> %1,
     i8 %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 32 x i1> %a
 }
@@ -1222,9 +1224,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1234,7 +1236,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.i16(
     <vscale x 1 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1244,9 +1246,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1261,7 +1263,7 @@ entry:
     <vscale x 1 x i16> %1,
     i16 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1269,9 +1271,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1281,7 +1283,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.i16(
     <vscale x 2 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1291,9 +1293,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1308,7 +1310,7 @@ entry:
     <vscale x 2 x i16> %1,
     i16 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1316,9 +1318,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1328,7 +1330,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.i16(
     <vscale x 4 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1338,9 +1340,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1355,7 +1357,7 @@ entry:
     <vscale x 4 x i16> %1,
     i16 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1363,9 +1365,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmseq_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmseq_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1375,7 +1377,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.i16(
     <vscale x 8 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1385,9 +1387,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -1402,7 +1404,7 @@ entry:
     <vscale x 8 x i16> %1,
     i16 %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1410,9 +1412,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmseq_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmseq_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1422,7 +1424,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.i16(
     <vscale x 16 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1432,9 +1434,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -1449,7 +1451,7 @@ entry:
     <vscale x 16 x i16> %1,
     i16 %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1457,9 +1459,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1469,7 +1471,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1479,9 +1481,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1496,7 +1498,7 @@ entry:
     <vscale x 1 x i32> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1504,9 +1506,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1516,7 +1518,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1526,9 +1528,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1543,7 +1545,7 @@ entry:
     <vscale x 2 x i32> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1551,9 +1553,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1563,7 +1565,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1573,9 +1575,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -1590,7 +1592,7 @@ entry:
     <vscale x 4 x i32> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1598,9 +1600,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmseq_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmseq_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1610,7 +1612,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1620,9 +1622,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -1637,7 +1639,7 @@ entry:
     <vscale x 8 x i32> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1645,25 +1647,31 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT:    vlse64.v v9, (a0), zero
-; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmseq_vx_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vmseq.vv v0, v8, v9
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmseq_vx_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT:    vmseq.vx v0, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1673,30 +1681,39 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
   <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT:    vlse64.v v11, (a0), zero
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmseq.vv v10, v8, v11, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmseq_mask_vx_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT:    vlse64.v v11, (a0), zero
+; RV32-NEXT:    vmv1r.v v10, v0
+; RV32-NEXT:    vmv1r.v v0, v9
+; RV32-NEXT:    vmseq.vv v10, v8, v11, v0.t
+; RV32-NEXT:    vmv.v.v v0, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmseq_mask_vx_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vmv1r.v v10, v0
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT:    vmv1r.v v0, v9
+; RV64-NEXT:    vmseq.vx v10, v8, a0, v0.t
+; RV64-NEXT:    vmv.v.v v0, v10
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i64> %1,
     i64 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1704,25 +1721,31 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vmseq.vv v0, v8, v10
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmseq_vx_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vmseq.vv v0, v8, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmseq_vx_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT:    vmseq.vx v0, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1732,30 +1755,39 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
   <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmseq.vv v11, v8, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmseq_mask_vx_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vmv1r.v v11, v0
+; RV32-NEXT:    vmv1r.v v0, v10
+; RV32-NEXT:    vmseq.vv v11, v8, v12, v0.t
+; RV32-NEXT:    vmv1r.v v0, v11
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmseq_mask_vx_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vmv1r.v v11, v0
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT:    vmv1r.v v0, v10
+; RV64-NEXT:    vmseq.vx v11, v8, a0, v0.t
+; RV64-NEXT:    vmv1r.v v0, v11
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i64> %1,
     i64 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1763,25 +1795,31 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vmseq.vv v0, v8, v12
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmseq_vx_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vmseq.vv v0, v8, v12
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmseq_vx_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT:    vmseq.vx v0, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1791,35 +1829,44 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
   <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmseq.vv v13, v8, v16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmseq_mask_vx_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vmv1r.v v13, v0
+; RV32-NEXT:    vmv1r.v v0, v12
+; RV32-NEXT:    vmseq.vv v13, v8, v16, v0.t
+; RV32-NEXT:    vmv1r.v v0, v13
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmseq_mask_vx_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vmv1r.v v13, v0
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT:    vmv1r.v v0, v12
+; RV64-NEXT:    vmseq.vx v13, v8, a0, v0.t
+; RV64-NEXT:    vmv1r.v v0, v13
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i64> %1,
     i64 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -1829,12 +1876,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8(
     <vscale x 1 x i8> %0,
     i8 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1849,12 +1896,12 @@ entry:
     <vscale x 1 x i8> %1,
     i8 9,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -1864,12 +1911,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.i8(
     <vscale x 2 x i8> %0,
     i8 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1884,12 +1931,12 @@ entry:
     <vscale x 2 x i8> %1,
     i8 9,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -1899,12 +1946,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.i8(
     <vscale x 4 x i8> %0,
     i8 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1919,12 +1966,12 @@ entry:
     <vscale x 4 x i8> %1,
     i8 9,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmseq_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmseq_vi_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -1934,12 +1981,12 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.i8(
     <vscale x 8 x i8> %0,
     i8 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1954,12 +2001,12 @@ entry:
     <vscale x 8 x i8> %1,
     i8 9,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmseq_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmseq_vi_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -1969,12 +2016,12 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.i8(
     <vscale x 16 x i8> %0,
     i8 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmseq_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmseq_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -1989,12 +2036,12 @@ entry:
     <vscale x 16 x i8> %1,
     i8 9,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 32 x i1> @intrinsic_vmseq_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmseq_vi_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -2004,12 +2051,12 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.i8(
     <vscale x 32 x i8> %0,
     i8 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 32 x i1> %a
 }
 
-define <vscale x 32 x i1> @intrinsic_vmseq_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmseq_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -2024,12 +2071,12 @@ entry:
     <vscale x 32 x i8> %1,
     i8 9,
     <vscale x 32 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 32 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -2039,12 +2086,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.i16(
     <vscale x 1 x i16> %0,
     i16 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2059,12 +2106,12 @@ entry:
     <vscale x 1 x i16> %1,
     i16 9,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -2074,12 +2121,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.i16(
     <vscale x 2 x i16> %0,
     i16 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2094,12 +2141,12 @@ entry:
     <vscale x 2 x i16> %1,
     i16 9,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -2109,12 +2156,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.i16(
     <vscale x 4 x i16> %0,
     i16 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2129,12 +2176,12 @@ entry:
     <vscale x 4 x i16> %1,
     i16 9,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmseq_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmseq_vi_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -2144,12 +2191,12 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.i16(
     <vscale x 8 x i16> %0,
     i16 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -2164,12 +2211,12 @@ entry:
     <vscale x 8 x i16> %1,
     i16 9,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmseq_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmseq_vi_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -2179,12 +2226,12 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.i16(
     <vscale x 16 x i16> %0,
     i16 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmseq_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmseq_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -2199,12 +2246,12 @@ entry:
     <vscale x 16 x i16> %1,
     i16 9,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -2214,12 +2261,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     i32 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2234,12 +2281,12 @@ entry:
     <vscale x 1 x i32> %1,
     i32 9,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -2249,12 +2296,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     i32 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2269,12 +2316,12 @@ entry:
     <vscale x 2 x i32> %1,
     i32 9,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -2284,12 +2331,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     i32 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -2304,12 +2351,12 @@ entry:
     <vscale x 4 x i32> %1,
     i32 9,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmseq_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmseq_vi_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -2319,12 +2366,12 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     i32 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -2339,12 +2386,12 @@ entry:
     <vscale x 8 x i32> %1,
     i32 9,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -2354,12 +2401,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2374,12 +2421,12 @@ entry:
     <vscale x 1 x i64> %1,
     i64 9,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -2389,12 +2436,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -2409,12 +2456,12 @@ entry:
     <vscale x 2 x i64> %1,
     i64 9,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -2424,12 +2471,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -2444,7 +2491,7 @@ entry:
     <vscale x 4 x i64> %1,
     i64 9,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll
deleted file mode 100644
index ad067cf5b9888..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll
+++ /dev/null
@@ -1,2757 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmsle.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsle.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8(
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i8> %3,
-    <vscale x 1 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmsle.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsle.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8(
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i8> %3,
-    <vscale x 2 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmsle.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsle.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8(
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i8> %3,
-    <vscale x 4 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsge_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmsle.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmsle.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8(
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i8> %3,
-    <vscale x 8 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsge_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vv_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vmsle.vv v0, v10, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmsle.vv v14, v10, v8
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmsle.vv v8, v12, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8(
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i8> %3,
-    <vscale x 16 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsge_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vv_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vmsle.vv v0, v12, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8(
-  <vscale x 32 x i1>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmsle.vv v20, v12, v8
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmsle.vv v8, v16, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8(
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i8> %3,
-    <vscale x 32 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vmsle.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsle.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16(
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i16> %3,
-    <vscale x 1 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vmsle.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsle.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16(
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i16> %3,
-    <vscale x 2 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vmsle.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmsle.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16(
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i16> %3,
-    <vscale x 4 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsge_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vmsle.vv v0, v10, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vmsle.vv v14, v10, v8
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmsle.vv v8, v12, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16(
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i16> %3,
-    <vscale x 8 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsge_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vv_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vmsle.vv v0, v12, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vmsle.vv v20, v12, v8
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmsle.vv v8, v16, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16(
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i16> %3,
-    <vscale x 16 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vmsle.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsle.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32(
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    i64 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i32> %3,
-    <vscale x 1 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vmsle.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmsle.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32(
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    i64 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i32> %3,
-    <vscale x 2 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmsle.vv v0, v10, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vmsle.vv v14, v10, v8
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmsle.vv v8, v12, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32(
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    i64 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i32> %3,
-    <vscale x 4 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsge_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmsle.vv v0, v12, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vmsle.vv v20, v12, v8
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmsle.vv v8, v16, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32(
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    i64 %4)
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i32> %3,
-    <vscale x 8 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vmsle.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmsle.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64(
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    i64 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i64> %3,
-    <vscale x 1 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vmsle.vv v0, v10, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vmsle.vv v14, v10, v8
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmsle.vv v8, v12, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64(
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    i64 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i64> %3,
-    <vscale x 2 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vmsle.vv v0, v12, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vmsle.vv v20, v12, v8
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmsle.vv v8, v16, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64(
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    i64 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i64> %3,
-    <vscale x 4 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  i8,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vmslt.vx v8, v8, a0
-; CHECK-NEXT:    vmnot.m v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmxor.mm v0, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i8> %1,
-    i8 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  i8,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vmslt.vx v8, v8, a0
-; CHECK-NEXT:    vmnot.m v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmxor.mm v0, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i8> %1,
-    i8 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  i8,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vmslt.vx v8, v8, a0
-; CHECK-NEXT:    vmnot.m v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmxor.mm v0, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i8> %1,
-    i8 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  i8,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vmslt.vx v8, v8, a0
-; CHECK-NEXT:    vmnot.m v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmxor.mm v0, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i8> %1,
-    i8 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  i8,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsge_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vmslt.vx v10, v8, a0
-; CHECK-NEXT:    vmnot.m v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmslt.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmxor.mm v0, v11, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i8> %1,
-    i8 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  i8,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsge_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vmslt.vx v12, v8, a0
-; CHECK-NEXT:    vmnot.m v0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
-  <vscale x 32 x i1>,
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsge_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmslt.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmxor.mm v0, v13, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i8> %1,
-    i8 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  i16,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vmslt.vx v8, v8, a0
-; CHECK-NEXT:    vmnot.m v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmxor.mm v0, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i16> %1,
-    i16 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  i16,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vmslt.vx v8, v8, a0
-; CHECK-NEXT:    vmnot.m v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmxor.mm v0, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i16> %1,
-    i16 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  i16,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vmslt.vx v8, v8, a0
-; CHECK-NEXT:    vmnot.m v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmxor.mm v0, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i16> %1,
-    i16 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  i16,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vmslt.vx v10, v8, a0
-; CHECK-NEXT:    vmnot.m v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmslt.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmxor.mm v0, v11, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i16> %1,
-    i16 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  i16,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsge_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vmslt.vx v12, v8, a0
-; CHECK-NEXT:    vmnot.m v0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmslt.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmxor.mm v0, v13, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i16> %1,
-    i16 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  i32,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vmslt.vx v8, v8, a0
-; CHECK-NEXT:    vmnot.m v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmxor.mm v0, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  i32,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vmslt.vx v8, v8, a0
-; CHECK-NEXT:    vmnot.m v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmxor.mm v0, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  i32,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vmslt.vx v10, v8, a0
-; CHECK-NEXT:    vmnot.m v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmslt.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmxor.mm v0, v11, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  i32,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vmslt.vx v12, v8, a0
-; CHECK-NEXT:    vmnot.m v0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmslt.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmxor.mm v0, v13, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  i64,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vmslt.vx v8, v8, a0
-; CHECK-NEXT:    vmnot.m v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i64>,
-  i64,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmxor.mm v0, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  i64,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vmslt.vx v10, v8, a0
-; CHECK-NEXT:    vmnot.m v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i64>,
-  i64,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmslt.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmxor.mm v0, v11, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  i64,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vmslt.vx v12, v8, a0
-; CHECK-NEXT:    vmnot.m v0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i64>,
-  i64,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmslt.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmxor.mm v0, v13, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vmsgt.vi v0, v8, -16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    i8 -15,
-    i64 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgt.vi v10, v8, -15, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i8> %1,
-    i8 -14,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vmsgt.vi v0, v8, -14
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    i8 -13,
-    i64 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgt.vi v10, v8, -13, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i8> %1,
-    i8 -12,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vmsgt.vi v0, v8, -12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    i8 -11,
-    i64 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgt.vi v10, v8, -11, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i8> %1,
-    i8 -10,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsge_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vmsgt.vi v0, v8, -10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    i8 -9,
-    i64 %1)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgt.vi v10, v8, -9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i8> %1,
-    i8 -8,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsge_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vi_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vmsgt.vi v0, v8, -8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    i8 -7,
-    i64 %1)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsge_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsgt.vi v11, v8, -7, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i8> %1,
-    i8 -6,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmsge_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vi_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vmsgt.vi v0, v8, -6
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    i8 -5,
-    i64 %1)
-
-  ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmsge_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsgt.vi v13, v8, -5, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i8> %1,
-    i8 -4,
-    <vscale x 32 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vmsgt.vi v0, v8, -4
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    i16 -3,
-    i64 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgt.vi v10, v8, -3, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i16> %1,
-    i16 -2,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vmsgt.vi v0, v8, -2
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    i16 -1,
-    i64 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgt.vi v10, v8, -1, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i16> %1,
-    i16 0,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vmsgt.vi v0, v8, -1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    i16 0,
-    i64 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgt.vi v10, v8, 0, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i16> %1,
-    i16 1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsge_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vmsgt.vi v0, v8, 1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    i16 2,
-    i64 %1)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsgt.vi v11, v8, 2, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i16> %1,
-    i16 3,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsge_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vi_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vmsgt.vi v0, v8, 3
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    i16 4,
-    i64 %1)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsge_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsgt.vi v13, v8, 4, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i16> %1,
-    i16 5,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vmsgt.vi v0, v8, 5
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    i32 6,
-    i64 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgt.vi v10, v8, 6, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i32> %1,
-    i32 7,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vmsgt.vi v0, v8, 7
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    i32 8,
-    i64 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgt.vi v10, v8, 8, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i32> %1,
-    i32 9,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    i32 10,
-    i64 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsgt.vi v11, v8, 10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i32> %1,
-    i32 11,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsge_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmsgt.vi v0, v8, 11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    i32 12,
-    i64 %1)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsgt.vi v13, v8, 12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i32> %1,
-    i32 13,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vmsgt.vi v0, v8, 13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    i64 14,
-    i64 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgt.vi v10, v8, 14, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i64> %1,
-    i64 15,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vmsgt.vi v0, v8, 15
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    i64 16,
-    i64 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsgt.vi v11, v8, -16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i64> %1,
-    i64 -15,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vmsgt.vi v0, v8, -15
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    i64 -14,
-    i64 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsgt.vi v13, v8, -14, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i64> %1,
-    i64 -13,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-; Test cases where the mask and maskedoff are the same value.
-define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vmslt.vx v8, v8, a0
-; CHECK-NEXT:    vmandn.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i8> %1,
-    i8 %2,
-    <vscale x 1 x i1> %0,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vmslt.vx v8, v8, a0
-; CHECK-NEXT:    vmandn.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i8> %1,
-    i8 %2,
-    <vscale x 2 x i1> %0,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vmslt.vx v8, v8, a0
-; CHECK-NEXT:    vmandn.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i8> %1,
-    i8 %2,
-    <vscale x 4 x i1> %0,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vmslt.vx v8, v8, a0
-; CHECK-NEXT:    vmandn.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i8> %1,
-    i8 %2,
-    <vscale x 8 x i1> %0,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vmslt.vx v10, v8, a0
-; CHECK-NEXT:    vmandn.mm v0, v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i8> %1,
-    i8 %2,
-    <vscale x 16 x i1> %0,
-    i64 %3)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vmslt.vx v12, v8, a0
-; CHECK-NEXT:    vmandn.mm v0, v0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i8> %1,
-    i8 %2,
-    <vscale x 32 x i1> %0,
-    i64 %3)
-
-  ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vmslt.vx v8, v8, a0
-; CHECK-NEXT:    vmandn.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i16> %1,
-    i16 %2,
-    <vscale x 1 x i1> %0,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vmslt.vx v8, v8, a0
-; CHECK-NEXT:    vmandn.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i16> %1,
-    i16 %2,
-    <vscale x 2 x i1> %0,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vmslt.vx v8, v8, a0
-; CHECK-NEXT:    vmandn.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i16> %1,
-    i16 %2,
-    <vscale x 4 x i1> %0,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vmslt.vx v10, v8, a0
-; CHECK-NEXT:    vmandn.mm v0, v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i16> %1,
-    i16 %2,
-    <vscale x 8 x i1> %0,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vmslt.vx v12, v8, a0
-; CHECK-NEXT:    vmandn.mm v0, v0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i16> %1,
-    i16 %2,
-    <vscale x 16 x i1> %0,
-    i64 %3)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vmslt.vx v8, v8, a0
-; CHECK-NEXT:    vmandn.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2,
-    <vscale x 1 x i1> %0,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vmslt.vx v8, v8, a0
-; CHECK-NEXT:    vmandn.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2,
-    <vscale x 2 x i1> %0,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vmslt.vx v10, v8, a0
-; CHECK-NEXT:    vmandn.mm v0, v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2,
-    <vscale x 4 x i1> %0,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vmslt.vx v12, v8, a0
-; CHECK-NEXT:    vmandn.mm v0, v0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2,
-    <vscale x 8 x i1> %0,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vmslt.vx v8, v8, a0
-; CHECK-NEXT:    vmandn.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2,
-    <vscale x 1 x i1> %0,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vmslt.vx v10, v8, a0
-; CHECK-NEXT:    vmandn.mm v0, v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2,
-    <vscale x 2 x i1> %0,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vmslt.vx v12, v8, a0
-; CHECK-NEXT:    vmandn.mm v0, v0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2,
-    <vscale x 4 x i1> %0,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge.ll
similarity index 82%
rename from llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vmsge.ll
index 2a34f94f14048..3e0c83e2794b4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsge.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
 declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -16,7 +18,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -26,9 +28,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -42,13 +44,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8(
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i8> %3,
     <vscale x 1 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -56,9 +58,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -68,7 +70,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -78,9 +80,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -94,13 +96,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8(
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i8> %3,
     <vscale x 2 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -108,9 +110,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -120,7 +122,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -130,9 +132,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -146,13 +148,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8(
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i8> %3,
     <vscale x 4 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -160,9 +162,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsge_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -172,7 +174,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -182,9 +184,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -198,13 +200,13 @@ entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8(
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i8> %3,
     <vscale x 8 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -212,9 +214,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsge_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsge_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -224,7 +226,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -234,9 +236,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -250,13 +252,13 @@ entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8(
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8(
     <vscale x 16 x i1> %0,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i8> %3,
     <vscale x 16 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -264,9 +266,9 @@ entry:
 declare <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsge_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsge_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -276,7 +278,7 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i1> %a
 }
@@ -286,9 +288,9 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -302,13 +304,13 @@ entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8(
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8(
     <vscale x 32 x i1> %0,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i8> %3,
     <vscale x 32 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 32 x i1> %a
 }
@@ -316,9 +318,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -328,7 +330,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -338,9 +340,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -354,13 +356,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16(
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i16> %3,
     <vscale x 1 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -368,9 +370,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -380,7 +382,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -390,9 +392,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -406,13 +408,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16(
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i16> %3,
     <vscale x 2 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -420,9 +422,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -432,7 +434,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -442,9 +444,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -458,13 +460,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16(
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i16> %3,
     <vscale x 4 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -472,9 +474,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsge_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -484,7 +486,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -494,9 +496,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -510,13 +512,13 @@ entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16(
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i16> %3,
     <vscale x 8 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -524,9 +526,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsge_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsge_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -536,7 +538,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -546,9 +548,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -562,13 +564,13 @@ entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16(
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16(
     <vscale x 16 x i1> %0,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i16> %3,
     <vscale x 16 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -576,9 +578,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -588,7 +590,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -598,9 +600,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -614,13 +616,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32(
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i32> %3,
     <vscale x 1 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -628,9 +630,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -640,7 +642,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -650,9 +652,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -666,13 +668,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32(
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i32> %3,
     <vscale x 2 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -680,9 +682,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -692,7 +694,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -702,9 +704,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -718,13 +720,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32(
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i32> %3,
     <vscale x 4 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -732,9 +734,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsge_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -744,7 +746,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -754,9 +756,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -770,13 +772,13 @@ entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32(
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i32> %3,
     <vscale x 8 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -784,9 +786,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -796,7 +798,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -806,9 +808,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -822,13 +824,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64(
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i64> %3,
     <vscale x 1 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -836,9 +838,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -848,7 +850,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -858,9 +860,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -874,13 +876,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64(
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i64> %3,
     <vscale x 2 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -888,9 +890,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -900,7 +902,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -910,9 +912,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -926,13 +928,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64(
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i64> %3,
     <vscale x 4 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -940,9 +942,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -953,7 +955,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.i8(
     <vscale x 1 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -963,9 +965,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -980,7 +982,7 @@ entry:
     <vscale x 1 x i8> %1,
     i8 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -988,9 +990,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -1001,7 +1003,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.i8(
     <vscale x 2 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1011,9 +1013,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1028,7 +1030,7 @@ entry:
     <vscale x 2 x i8> %1,
     i8 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1036,9 +1038,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -1049,7 +1051,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8(
     <vscale x 4 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1059,9 +1061,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1076,7 +1078,7 @@ entry:
     <vscale x 4 x i8> %1,
     i8 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1084,9 +1086,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -1097,7 +1099,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.i8(
     <vscale x 8 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1107,9 +1109,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1124,7 +1126,7 @@ entry:
     <vscale x 8 x i8> %1,
     i8 %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1132,9 +1134,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsge_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsge_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -1145,7 +1147,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.i8(
     <vscale x 16 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1155,9 +1157,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -1172,7 +1174,7 @@ entry:
     <vscale x 16 x i8> %1,
     i8 %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1180,9 +1182,9 @@ entry:
 declare <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsge_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsge_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -1193,7 +1195,7 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.i8(
     <vscale x 32 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i1> %a
 }
@@ -1203,9 +1205,9 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsge_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsge_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -1220,7 +1222,7 @@ entry:
     <vscale x 32 x i8> %1,
     i8 %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 32 x i1> %a
 }
@@ -1228,9 +1230,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1241,7 +1243,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.i16(
     <vscale x 1 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1251,9 +1253,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1268,7 +1270,7 @@ entry:
     <vscale x 1 x i16> %1,
     i16 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1276,9 +1278,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1289,7 +1291,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.i16(
     <vscale x 2 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1299,9 +1301,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1316,7 +1318,7 @@ entry:
     <vscale x 2 x i16> %1,
     i16 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1324,9 +1326,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1337,7 +1339,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.i16(
     <vscale x 4 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1347,9 +1349,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1364,7 +1366,7 @@ entry:
     <vscale x 4 x i16> %1,
     i16 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1372,9 +1374,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1385,7 +1387,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.i16(
     <vscale x 8 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1395,9 +1397,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -1412,7 +1414,7 @@ entry:
     <vscale x 8 x i16> %1,
     i16 %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1420,9 +1422,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsge_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsge_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1433,7 +1435,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.i16(
     <vscale x 16 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1443,9 +1445,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -1460,7 +1462,7 @@ entry:
     <vscale x 16 x i16> %1,
     i16 %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1468,9 +1470,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1481,7 +1483,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1491,9 +1493,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1508,7 +1510,7 @@ entry:
     <vscale x 1 x i32> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1516,9 +1518,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1529,7 +1531,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1539,9 +1541,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1556,7 +1558,7 @@ entry:
     <vscale x 2 x i32> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1564,9 +1566,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1577,7 +1579,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1587,9 +1589,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -1604,7 +1606,7 @@ entry:
     <vscale x 4 x i32> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1612,9 +1614,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1625,7 +1627,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1635,9 +1637,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -1652,7 +1654,7 @@ entry:
     <vscale x 8 x i32> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1660,25 +1662,32 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT:    vlse64.v v9, (a0), zero
-; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsge_vx_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vmsle.vv v0, v9, v8
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsge_vx_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT:    vmslt.vx v8, v8, a0
+; RV64-NEXT:    vmnot.m v0, v8
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1688,30 +1697,39 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
   <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT:    vlse64.v v11, (a0), zero
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsle.vv v10, v11, v8, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT:    vlse64.v v11, (a0), zero
+; RV32-NEXT:    vmv1r.v v10, v0
+; RV32-NEXT:    vmv1r.v v0, v9
+; RV32-NEXT:    vmsle.vv v10, v11, v8, v0.t
+; RV32-NEXT:    vmv.v.v v0, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vmv1r.v v10, v0
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT:    vmv1r.v v0, v9
+; RV64-NEXT:    vmslt.vx v10, v8, a0, v0.t
+; RV64-NEXT:    vmxor.mm v0, v10, v9
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i64> %1,
     i64 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1719,25 +1737,32 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vmsle.vv v0, v10, v8
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsge_vx_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vmsle.vv v0, v10, v8
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsge_vx_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT:    vmslt.vx v10, v8, a0
+; RV64-NEXT:    vmnot.m v0, v10
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1747,30 +1772,39 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
   <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsle.vv v11, v12, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsge_mask_vx_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vmv1r.v v11, v0
+; RV32-NEXT:    vmv1r.v v0, v10
+; RV32-NEXT:    vmsle.vv v11, v12, v8, v0.t
+; RV32-NEXT:    vmv1r.v v0, v11
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsge_mask_vx_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vmv1r.v v11, v0
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT:    vmv1r.v v0, v10
+; RV64-NEXT:    vmslt.vx v11, v8, a0, v0.t
+; RV64-NEXT:    vmxor.mm v0, v11, v10
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i64> %1,
     i64 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1778,25 +1812,32 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vmsle.vv v0, v12, v8
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsge_vx_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vmsle.vv v0, v12, v8
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsge_vx_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT:    vmslt.vx v12, v8, a0
+; RV64-NEXT:    vmnot.m v0, v12
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1806,35 +1847,44 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
   <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsle.vv v13, v16, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsge_mask_vx_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vmv1r.v v13, v0
+; RV32-NEXT:    vmv1r.v v0, v12
+; RV32-NEXT:    vmsle.vv v13, v16, v8, v0.t
+; RV32-NEXT:    vmv1r.v v0, v13
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsge_mask_vx_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vmv1r.v v13, v0
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT:    vmv1r.v v0, v12
+; RV64-NEXT:    vmslt.vx v13, v8, a0, v0.t
+; RV64-NEXT:    vmxor.mm v0, v13, v12
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i64> %1,
     i64 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -1844,12 +1894,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.i8(
     <vscale x 1 x i8> %0,
     i8 -15,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1864,12 +1914,12 @@ entry:
     <vscale x 1 x i8> %1,
     i8 -14,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -1879,12 +1929,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.i8(
     <vscale x 2 x i8> %0,
     i8 -13,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1899,12 +1949,12 @@ entry:
     <vscale x 2 x i8> %1,
     i8 -12,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -1914,12 +1964,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8(
     <vscale x 4 x i8> %0,
     i8 -11,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1934,12 +1984,12 @@ entry:
     <vscale x 4 x i8> %1,
     i8 -10,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsge_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_vi_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -1949,12 +1999,12 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.i8(
     <vscale x 8 x i8> %0,
     i8 -9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1969,12 +2019,12 @@ entry:
     <vscale x 8 x i8> %1,
     i8 -8,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmsge_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsge_vi_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -1984,12 +2034,12 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.i8(
     <vscale x 16 x i8> %0,
     i8 -7,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmsge_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsge_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -2004,12 +2054,12 @@ entry:
     <vscale x 16 x i8> %1,
     i8 -6,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 32 x i1> @intrinsic_vmsge_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsge_vi_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -2019,12 +2069,12 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.i8(
     <vscale x 32 x i8> %0,
     i8 -5,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 32 x i1> %a
 }
 
-define <vscale x 32 x i1> @intrinsic_vmsge_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsge_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -2039,12 +2089,12 @@ entry:
     <vscale x 32 x i8> %1,
     i8 -4,
     <vscale x 32 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 32 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -2054,12 +2104,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.i16(
     <vscale x 1 x i16> %0,
     i16 -3,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2074,12 +2124,12 @@ entry:
     <vscale x 1 x i16> %1,
     i16 -2,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -2089,12 +2139,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.i16(
     <vscale x 2 x i16> %0,
     i16 -1,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2109,12 +2159,12 @@ entry:
     <vscale x 2 x i16> %1,
     i16 0,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -2124,12 +2174,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.i16(
     <vscale x 4 x i16> %0,
     i16 0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2144,12 +2194,12 @@ entry:
     <vscale x 4 x i16> %1,
     i16 1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsge_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_vi_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -2159,12 +2209,12 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.i16(
     <vscale x 8 x i16> %0,
     i16 2,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -2179,12 +2229,12 @@ entry:
     <vscale x 8 x i16> %1,
     i16 3,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmsge_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsge_vi_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -2194,12 +2244,12 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.i16(
     <vscale x 16 x i16> %0,
     i16 4,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmsge_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsge_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -2214,12 +2264,12 @@ entry:
     <vscale x 16 x i16> %1,
     i16 5,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -2229,12 +2279,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     i32 6,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2249,12 +2299,12 @@ entry:
     <vscale x 1 x i32> %1,
     i32 7,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -2264,12 +2314,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     i32 8,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2284,12 +2334,12 @@ entry:
     <vscale x 2 x i32> %1,
     i32 9,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -2299,12 +2349,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     i32 10,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -2319,12 +2369,12 @@ entry:
     <vscale x 4 x i32> %1,
     i32 11,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsge_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_vi_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -2334,12 +2384,12 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     i32 12,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -2354,12 +2404,12 @@ entry:
     <vscale x 8 x i32> %1,
     i32 13,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -2369,12 +2419,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2389,12 +2439,12 @@ entry:
     <vscale x 1 x i64> %1,
     i64 9,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -2404,12 +2454,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -2424,12 +2474,12 @@ entry:
     <vscale x 2 x i64> %1,
     i64 9,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -2439,12 +2489,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -2459,13 +2509,13 @@ entry:
     <vscale x 4 x i64> %1,
     i64 9,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
 
 ; Test cases where the mask and maskedoff are the same value.
-define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -2478,12 +2528,12 @@ entry:
     <vscale x 1 x i8> %1,
     i8 %2,
     <vscale x 1 x i1> %0,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -2496,12 +2546,12 @@ entry:
     <vscale x 2 x i8> %1,
     i8 %2,
     <vscale x 2 x i1> %0,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -2514,12 +2564,12 @@ entry:
     <vscale x 4 x i8> %1,
     i8 %2,
     <vscale x 4 x i1> %0,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -2532,12 +2582,12 @@ entry:
     <vscale x 8 x i8> %1,
     i8 %2,
     <vscale x 8 x i1> %0,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -2550,12 +2600,12 @@ entry:
     <vscale x 16 x i8> %1,
     i8 %2,
     <vscale x 16 x i1> %0,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 32 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, i32 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -2568,12 +2618,12 @@ entry:
     <vscale x 32 x i8> %1,
     i8 %2,
     <vscale x 32 x i1> %0,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 32 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -2586,12 +2636,12 @@ entry:
     <vscale x 1 x i16> %1,
     i16 %2,
     <vscale x 1 x i1> %0,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -2604,12 +2654,12 @@ entry:
     <vscale x 2 x i16> %1,
     i16 %2,
     <vscale x 2 x i1> %0,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -2622,12 +2672,12 @@ entry:
     <vscale x 4 x i16> %1,
     i16 %2,
     <vscale x 4 x i1> %0,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -2640,12 +2690,12 @@ entry:
     <vscale x 8 x i16> %1,
     i16 %2,
     <vscale x 8 x i1> %0,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -2658,12 +2708,12 @@ entry:
     <vscale x 16 x i16> %1,
     i16 %2,
     <vscale x 16 x i1> %0,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -2676,12 +2726,12 @@ entry:
     <vscale x 1 x i32> %1,
     i32 %2,
     <vscale x 1 x i1> %0,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -2694,12 +2744,12 @@ entry:
     <vscale x 2 x i32> %1,
     i32 %2,
     <vscale x 2 x i1> %0,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -2712,12 +2762,12 @@ entry:
     <vscale x 4 x i32> %1,
     i32 %2,
     <vscale x 4 x i1> %0,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -2730,80 +2780,101 @@ entry:
     <vscale x 8 x i32> %1,
     i32 %2,
     <vscale x 8 x i1> %0,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT:    vlse64.v v9, (a0), zero
-; CHECK-NEXT:    vmsle.vv v0, v9, v8, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vmsle.vv v0, v9, v8, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT:    vmslt.vx v8, v8, a0
+; RV64-NEXT:    vmandn.mm v0, v0, v8
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i64> %1,
     i64 %2,
     <vscale x 1 x i1> %0,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vmsle.vv v10, v12, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vmv1r.v v10, v0
+; RV32-NEXT:    vmsle.vv v10, v12, v8, v0.t
+; RV32-NEXT:    vmv1r.v v0, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT:    vmslt.vx v10, v8, a0
+; RV64-NEXT:    vmandn.mm v0, v0, v10
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i64> %1,
     i64 %2,
     <vscale x 2 x i1> %0,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vmv1r.v v12, v0
-; CHECK-NEXT:    vmsle.vv v12, v16, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vmv1r.v v12, v0
+; RV32-NEXT:    vmsle.vv v12, v16, v8, v0.t
+; RV32-NEXT:    vmv1r.v v0, v12
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT:    vmslt.vx v12, v8, a0
+; RV64-NEXT:    vmandn.mm v0, v0, v12
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i64> %1,
     i64 %2,
     <vscale x 4 x i1> %0,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll
deleted file mode 100644
index 36af3a9455a1d..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll
+++ /dev/null
@@ -1,2769 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmsleu.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsleu.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8(
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i8> %3,
-    <vscale x 1 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmsleu.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsleu.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8(
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i8> %3,
-    <vscale x 2 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmsleu.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsleu.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8(
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i8> %3,
-    <vscale x 4 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmsleu.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmsleu.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8(
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i8> %3,
-    <vscale x 8 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgeu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vmsleu.vv v0, v10, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmsleu.vv v14, v10, v8
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmsleu.vv v8, v12, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8(
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i8> %3,
-    <vscale x 16 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsgeu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vmsleu.vv v0, v12, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8(
-  <vscale x 32 x i1>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmsleu.vv v20, v12, v8
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmsleu.vv v8, v16, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8(
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i8> %3,
-    <vscale x 32 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vmsleu.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsleu.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16(
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i16> %3,
-    <vscale x 1 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vmsleu.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsleu.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16(
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i16> %3,
-    <vscale x 2 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vmsleu.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmsleu.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16(
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i16> %3,
-    <vscale x 4 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vmsleu.vv v0, v10, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vmsleu.vv v14, v10, v8
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmsleu.vv v8, v12, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16(
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i16> %3,
-    <vscale x 8 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgeu_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vmsleu.vv v0, v12, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vmsleu.vv v20, v12, v8
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmsleu.vv v8, v16, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16(
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i16> %3,
-    <vscale x 16 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vmsleu.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsleu.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32(
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    i64 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i32> %3,
-    <vscale x 1 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vmsleu.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmsleu.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32(
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    i64 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i32> %3,
-    <vscale x 2 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmsleu.vv v0, v10, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vmsleu.vv v14, v10, v8
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmsleu.vv v8, v12, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32(
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    i64 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i32> %3,
-    <vscale x 4 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmsleu.vv v0, v12, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vmsleu.vv v20, v12, v8
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmsleu.vv v8, v16, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32(
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    i64 %4)
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i32> %3,
-    <vscale x 8 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vmsleu.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmsleu.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64(
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    i64 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i64> %3,
-    <vscale x 1 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vmsleu.vv v0, v10, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vmsleu.vv v14, v10, v8
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmsleu.vv v8, v12, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64(
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    i64 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i64> %3,
-    <vscale x 2 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vmsleu.vv v0, v12, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vmsleu.vv v20, v12, v8
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmsleu.vv v8, v16, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64(
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    i64 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i64> %3,
-    <vscale x 4 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  i8,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vmsltu.vx v8, v8, a0
-; CHECK-NEXT:    vmnot.m v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmxor.mm v0, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i8> %1,
-    i8 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  i8,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vmsltu.vx v8, v8, a0
-; CHECK-NEXT:    vmnot.m v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmxor.mm v0, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i8> %1,
-    i8 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  i8,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vmsltu.vx v8, v8, a0
-; CHECK-NEXT:    vmnot.m v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmxor.mm v0, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i8> %1,
-    i8 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  i8,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vmsltu.vx v8, v8, a0
-; CHECK-NEXT:    vmnot.m v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmxor.mm v0, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i8> %1,
-    i8 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  i8,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgeu_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vmsltu.vx v10, v8, a0
-; CHECK-NEXT:    vmnot.m v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsltu.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmxor.mm v0, v11, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i8> %1,
-    i8 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  i8,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsgeu_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vmsltu.vx v12, v8, a0
-; CHECK-NEXT:    vmnot.m v0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8(
-  <vscale x 32 x i1>,
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsltu.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmxor.mm v0, v13, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i8> %1,
-    i8 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  i16,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vmsltu.vx v8, v8, a0
-; CHECK-NEXT:    vmnot.m v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmxor.mm v0, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i16> %1,
-    i16 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  i16,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vmsltu.vx v8, v8, a0
-; CHECK-NEXT:    vmnot.m v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmxor.mm v0, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i16> %1,
-    i16 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  i16,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vmsltu.vx v8, v8, a0
-; CHECK-NEXT:    vmnot.m v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmxor.mm v0, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i16> %1,
-    i16 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  i16,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vmsltu.vx v10, v8, a0
-; CHECK-NEXT:    vmnot.m v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsltu.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmxor.mm v0, v11, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i16> %1,
-    i16 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  i16,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgeu_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vmsltu.vx v12, v8, a0
-; CHECK-NEXT:    vmnot.m v0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsltu.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmxor.mm v0, v13, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i16> %1,
-    i16 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  i32,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vmsltu.vx v8, v8, a0
-; CHECK-NEXT:    vmnot.m v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmxor.mm v0, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  i32,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vmsltu.vx v8, v8, a0
-; CHECK-NEXT:    vmnot.m v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmxor.mm v0, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  i32,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vmsltu.vx v10, v8, a0
-; CHECK-NEXT:    vmnot.m v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsltu.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmxor.mm v0, v11, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  i32,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vmsltu.vx v12, v8, a0
-; CHECK-NEXT:    vmnot.m v0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsltu.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmxor.mm v0, v13, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  i64,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vmsltu.vx v8, v8, a0
-; CHECK-NEXT:    vmnot.m v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i64>,
-  i64,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmxor.mm v0, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  i64,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vmsltu.vx v10, v8, a0
-; CHECK-NEXT:    vmnot.m v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i64>,
-  i64,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsltu.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmxor.mm v0, v11, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  i64,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vmsltu.vx v12, v8, a0
-; CHECK-NEXT:    vmnot.m v0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i64>,
-  i64,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsltu.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmxor.mm v0, v13, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vmsgtu.vi v0, v8, -16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    i8 -15,
-    i64 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgtu.vi v10, v8, -15, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i8> %1,
-    i8 -14,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vmsgtu.vi v0, v8, -14
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    i8 -13,
-    i64 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgtu.vi v10, v8, -13, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i8> %1,
-    i8 -12,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vmsgtu.vi v0, v8, -12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    i8 -11,
-    i64 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgtu.vi v10, v8, -11, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i8> %1,
-    i8 -10,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vmsgtu.vi v0, v8, -10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    i8 -9,
-    i64 %1)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgtu.vi v10, v8, -9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i8> %1,
-    i8 -8,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsgeu_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vmsgtu.vi v0, v8, -8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    i8 -7,
-    i64 %1)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsgtu.vi v11, v8, -7, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i8> %1,
-    i8 -6,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmsgeu_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vmsgtu.vi v0, v8, -6
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    i8 -5,
-    i64 %1)
-
-  ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsgtu.vi v13, v8, -5, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i8> %1,
-    i8 -4,
-    <vscale x 32 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vmsgtu.vi v0, v8, -4
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    i16 -3,
-    i64 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgtu.vi v10, v8, -3, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i16> %1,
-    i16 -2,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vmsgtu.vi v0, v8, -2
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    i16 -1,
-    i64 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vmor.mm v0, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i16> %1,
-    i16 0,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vmset.m v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    i16 0,
-    i64 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i16_i16_same_mask_maskedoff(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i16_i16_same_mask_maskedoff:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i16> %1,
-    i16 0,
-    <vscale x 2 x i1> %0,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgtu.vi v10, v8, 0, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i16> %1,
-    i16 1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vmsgtu.vi v0, v8, 1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    i16 2,
-    i64 %1)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsgtu.vi v11, v8, 2, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i16> %1,
-    i16 3,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsgeu_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vmsgtu.vi v0, v8, 3
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    i16 4,
-    i64 %1)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsgtu.vi v13, v8, 4, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i16> %1,
-    i16 5,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vmsgtu.vi v0, v8, 5
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    i32 6,
-    i64 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgtu.vi v10, v8, 6, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i32> %1,
-    i32 7,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vmsgtu.vi v0, v8, 7
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    i32 8,
-    i64 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgtu.vi v10, v8, 8, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i32> %1,
-    i32 9,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    i32 10,
-    i64 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsgtu.vi v11, v8, 10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i32> %1,
-    i32 11,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmsgtu.vi v0, v8, 11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    i32 12,
-    i64 %1)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsgtu.vi v13, v8, 12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i32> %1,
-    i32 13,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vmsgtu.vi v0, v8, 13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    i64 14,
-    i64 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgtu.vi v10, v8, 14, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i64> %1,
-    i64 15,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vmsgtu.vi v0, v8, 15
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    i64 16,
-    i64 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsgtu.vi v11, v8, -16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i64> %1,
-    i64 -15,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vmsgtu.vi v0, v8, -15
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    i64 -14,
-    i64 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsgtu.vi v13, v8, -14, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i64> %1,
-    i64 -13,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-; Test cases where the mask and maskedoff are the same value.
-define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vmsltu.vx v8, v8, a0
-; CHECK-NEXT:    vmandn.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i8> %1,
-    i8 %2,
-    <vscale x 1 x i1> %0,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vmsltu.vx v8, v8, a0
-; CHECK-NEXT:    vmandn.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i8> %1,
-    i8 %2,
-    <vscale x 2 x i1> %0,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vmsltu.vx v8, v8, a0
-; CHECK-NEXT:    vmandn.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i8> %1,
-    i8 %2,
-    <vscale x 4 x i1> %0,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vmsltu.vx v8, v8, a0
-; CHECK-NEXT:    vmandn.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i8> %1,
-    i8 %2,
-    <vscale x 8 x i1> %0,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vmsltu.vx v10, v8, a0
-; CHECK-NEXT:    vmandn.mm v0, v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i8> %1,
-    i8 %2,
-    <vscale x 16 x i1> %0,
-    i64 %3)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vmsltu.vx v12, v8, a0
-; CHECK-NEXT:    vmandn.mm v0, v0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i8> %1,
-    i8 %2,
-    <vscale x 32 x i1> %0,
-    i64 %3)
-
-  ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vmsltu.vx v8, v8, a0
-; CHECK-NEXT:    vmandn.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i16> %1,
-    i16 %2,
-    <vscale x 1 x i1> %0,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vmsltu.vx v8, v8, a0
-; CHECK-NEXT:    vmandn.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i16> %1,
-    i16 %2,
-    <vscale x 2 x i1> %0,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vmsltu.vx v8, v8, a0
-; CHECK-NEXT:    vmandn.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i16> %1,
-    i16 %2,
-    <vscale x 4 x i1> %0,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vmsltu.vx v10, v8, a0
-; CHECK-NEXT:    vmandn.mm v0, v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i16> %1,
-    i16 %2,
-    <vscale x 8 x i1> %0,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vmsltu.vx v12, v8, a0
-; CHECK-NEXT:    vmandn.mm v0, v0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i16> %1,
-    i16 %2,
-    <vscale x 16 x i1> %0,
-    i64 %3)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vmsltu.vx v8, v8, a0
-; CHECK-NEXT:    vmandn.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2,
-    <vscale x 1 x i1> %0,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vmsltu.vx v8, v8, a0
-; CHECK-NEXT:    vmandn.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2,
-    <vscale x 2 x i1> %0,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vmsltu.vx v10, v8, a0
-; CHECK-NEXT:    vmandn.mm v0, v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2,
-    <vscale x 4 x i1> %0,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vmsltu.vx v12, v8, a0
-; CHECK-NEXT:    vmandn.mm v0, v0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2,
-    <vscale x 8 x i1> %0,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vmsltu.vx v8, v8, a0
-; CHECK-NEXT:    vmandn.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2,
-    <vscale x 1 x i1> %0,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vmsltu.vx v10, v8, a0
-; CHECK-NEXT:    vmandn.mm v0, v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2,
-    <vscale x 2 x i1> %0,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vmsltu.vx v12, v8, a0
-; CHECK-NEXT:    vmandn.mm v0, v0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2,
-    <vscale x 4 x i1> %0,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll
similarity index 83%
rename from llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll
index 7b5cd7160c90d..cfb8bd550ed0c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
 declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -16,7 +18,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -26,9 +28,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -42,13 +44,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8(
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i8> %3,
     <vscale x 1 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -56,9 +58,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -68,7 +70,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -78,9 +80,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -94,13 +96,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8(
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i8> %3,
     <vscale x 2 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -108,9 +110,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -120,7 +122,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -130,9 +132,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -146,13 +148,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8(
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i8> %3,
     <vscale x 4 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -160,9 +162,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsgeu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -172,7 +174,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -182,9 +184,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -198,13 +200,13 @@ entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8(
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i8> %3,
     <vscale x 8 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -212,9 +214,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsgeu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgeu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -224,7 +226,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -234,9 +236,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -250,13 +252,13 @@ entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8(
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8(
     <vscale x 16 x i1> %0,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i8> %3,
     <vscale x 16 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -264,9 +266,9 @@ entry:
 declare <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsgeu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsgeu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -276,7 +278,7 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i1> %a
 }
@@ -286,9 +288,9 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -302,13 +304,13 @@ entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8(
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8(
     <vscale x 32 x i1> %0,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i8> %3,
     <vscale x 32 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 32 x i1> %a
 }
@@ -316,9 +318,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -328,7 +330,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -338,9 +340,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -354,13 +356,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16(
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i16> %3,
     <vscale x 1 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -368,9 +370,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -380,7 +382,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -390,9 +392,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -406,13 +408,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16(
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i16> %3,
     <vscale x 2 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -420,9 +422,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -432,7 +434,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -442,9 +444,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -458,13 +460,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16(
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i16> %3,
     <vscale x 4 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -472,9 +474,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsgeu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -484,7 +486,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -494,9 +496,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -510,13 +512,13 @@ entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16(
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i16> %3,
     <vscale x 8 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -524,9 +526,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsgeu_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgeu_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -536,7 +538,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -546,9 +548,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -562,13 +564,13 @@ entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16(
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16(
     <vscale x 16 x i1> %0,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i16> %3,
     <vscale x 16 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -576,9 +578,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -588,7 +590,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -598,9 +600,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -614,13 +616,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32(
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i32> %3,
     <vscale x 1 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -628,9 +630,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -640,7 +642,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -650,9 +652,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -666,13 +668,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32(
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i32> %3,
     <vscale x 2 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -680,9 +682,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -692,7 +694,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -702,9 +704,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -718,13 +720,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32(
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i32> %3,
     <vscale x 4 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -732,9 +734,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsgeu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -744,7 +746,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -754,9 +756,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -770,13 +772,13 @@ entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32(
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i32> %3,
     <vscale x 8 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -784,9 +786,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -796,7 +798,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -806,9 +808,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -822,13 +824,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64(
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i64> %3,
     <vscale x 1 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -836,9 +838,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -848,7 +850,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -858,9 +860,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -874,13 +876,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64(
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i64> %3,
     <vscale x 2 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -888,9 +890,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -900,7 +902,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -910,9 +912,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -926,13 +928,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64(
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i64> %3,
     <vscale x 4 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -940,9 +942,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -953,7 +955,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -963,9 +965,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -980,7 +982,7 @@ entry:
     <vscale x 1 x i8> %1,
     i8 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -988,9 +990,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -1001,7 +1003,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1011,9 +1013,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1028,7 +1030,7 @@ entry:
     <vscale x 2 x i8> %1,
     i8 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1036,9 +1038,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -1049,7 +1051,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1059,9 +1061,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1076,7 +1078,7 @@ entry:
     <vscale x 4 x i8> %1,
     i8 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1084,9 +1086,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsgeu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -1097,7 +1099,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1107,9 +1109,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1124,7 +1126,7 @@ entry:
     <vscale x 8 x i8> %1,
     i8 %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1132,9 +1134,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsgeu_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgeu_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -1145,7 +1147,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1155,9 +1157,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -1172,7 +1174,7 @@ entry:
     <vscale x 16 x i8> %1,
     i8 %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1180,9 +1182,9 @@ entry:
 declare <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsgeu_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsgeu_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -1193,7 +1195,7 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i1> %a
 }
@@ -1203,9 +1205,9 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -1220,7 +1222,7 @@ entry:
     <vscale x 32 x i8> %1,
     i8 %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 32 x i1> %a
 }
@@ -1228,9 +1230,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1241,7 +1243,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1251,9 +1253,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1268,7 +1270,7 @@ entry:
     <vscale x 1 x i16> %1,
     i16 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1276,9 +1278,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1289,7 +1291,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1299,9 +1301,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1316,7 +1318,7 @@ entry:
     <vscale x 2 x i16> %1,
     i16 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1324,9 +1326,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1337,7 +1339,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1347,9 +1349,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1364,7 +1366,7 @@ entry:
     <vscale x 4 x i16> %1,
     i16 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1372,9 +1374,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsgeu_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1385,7 +1387,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1395,9 +1397,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -1412,7 +1414,7 @@ entry:
     <vscale x 8 x i16> %1,
     i16 %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1420,9 +1422,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsgeu_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgeu_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1433,7 +1435,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1443,9 +1445,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -1460,7 +1462,7 @@ entry:
     <vscale x 16 x i16> %1,
     i16 %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1468,9 +1470,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1481,7 +1483,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1491,9 +1493,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1508,7 +1510,7 @@ entry:
     <vscale x 1 x i32> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1516,9 +1518,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1529,7 +1531,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1539,9 +1541,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1556,7 +1558,7 @@ entry:
     <vscale x 2 x i32> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1564,9 +1566,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1577,7 +1579,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1587,9 +1589,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -1604,7 +1606,7 @@ entry:
     <vscale x 4 x i32> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1612,9 +1614,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsgeu_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1625,7 +1627,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1635,9 +1637,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -1652,7 +1654,7 @@ entry:
     <vscale x 8 x i32> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1660,25 +1662,32 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT:    vlse64.v v9, (a0), zero
-; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsgeu_vx_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vmsleu.vv v0, v9, v8
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsgeu_vx_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT:    vmsltu.vx v8, v8, a0
+; RV64-NEXT:    vmnot.m v0, v8
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1688,30 +1697,39 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
   <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT:    vlse64.v v11, (a0), zero
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsleu.vv v10, v11, v8, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT:    vlse64.v v11, (a0), zero
+; RV32-NEXT:    vmv1r.v v10, v0
+; RV32-NEXT:    vmv1r.v v0, v9
+; RV32-NEXT:    vmsleu.vv v10, v11, v8, v0.t
+; RV32-NEXT:    vmv.v.v v0, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vmv1r.v v10, v0
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT:    vmv1r.v v0, v9
+; RV64-NEXT:    vmsltu.vx v10, v8, a0, v0.t
+; RV64-NEXT:    vmxor.mm v0, v10, v9
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i64> %1,
     i64 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1719,25 +1737,32 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vmsleu.vv v0, v10, v8
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsgeu_vx_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vmsleu.vv v0, v10, v8
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsgeu_vx_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT:    vmsltu.vx v10, v8, a0
+; RV64-NEXT:    vmnot.m v0, v10
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1747,30 +1772,39 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
   <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsleu.vv v11, v12, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vmv1r.v v11, v0
+; RV32-NEXT:    vmv1r.v v0, v10
+; RV32-NEXT:    vmsleu.vv v11, v12, v8, v0.t
+; RV32-NEXT:    vmv1r.v v0, v11
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vmv1r.v v11, v0
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT:    vmv1r.v v0, v10
+; RV64-NEXT:    vmsltu.vx v11, v8, a0, v0.t
+; RV64-NEXT:    vmxor.mm v0, v11, v10
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i64> %1,
     i64 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1778,25 +1812,32 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vmsleu.vv v0, v12, v8
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsgeu_vx_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vmsleu.vv v0, v12, v8
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsgeu_vx_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT:    vmsltu.vx v12, v8, a0
+; RV64-NEXT:    vmnot.m v0, v12
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1806,35 +1847,44 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
   <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsleu.vv v13, v16, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vmv1r.v v13, v0
+; RV32-NEXT:    vmv1r.v v0, v12
+; RV32-NEXT:    vmsleu.vv v13, v16, v8, v0.t
+; RV32-NEXT:    vmv1r.v v0, v13
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vmv1r.v v13, v0
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT:    vmv1r.v v0, v12
+; RV64-NEXT:    vmsltu.vx v13, v8, a0, v0.t
+; RV64-NEXT:    vmxor.mm v0, v13, v12
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i64> %1,
     i64 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -1844,12 +1894,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
     i8 -15,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1864,12 +1914,12 @@ entry:
     <vscale x 1 x i8> %1,
     i8 -14,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -1879,12 +1929,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
     i8 -13,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1899,12 +1949,12 @@ entry:
     <vscale x 2 x i8> %1,
     i8 -12,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -1914,12 +1964,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
     i8 -11,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1934,12 +1984,12 @@ entry:
     <vscale x 4 x i8> %1,
     i8 -10,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsgeu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -1949,12 +1999,12 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
     i8 -9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1969,12 +2019,12 @@ entry:
     <vscale x 8 x i8> %1,
     i8 -8,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmsgeu_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgeu_vi_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -1984,12 +2034,12 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
     i8 -7,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -2004,12 +2054,12 @@ entry:
     <vscale x 16 x i8> %1,
     i8 -6,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 32 x i1> @intrinsic_vmsgeu_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsgeu_vi_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -2019,12 +2069,12 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
     i8 -5,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 32 x i1> %a
 }
 
-define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -2039,12 +2089,12 @@ entry:
     <vscale x 32 x i8> %1,
     i8 -4,
     <vscale x 32 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 32 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -2054,12 +2104,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
     i16 -3,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2074,12 +2124,12 @@ entry:
     <vscale x 1 x i16> %1,
     i16 -2,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -2089,12 +2139,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
     i16 -1,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -2106,12 +2156,12 @@ entry:
     <vscale x 2 x i16> %1,
     i16 0,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i16_i16_same_mask_maskedoff(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i16_i16_same_mask_maskedoff(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i16_i16_same_mask_maskedoff:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    ret
@@ -2121,12 +2171,12 @@ entry:
     <vscale x 2 x i16> %1,
     i16 0,
     <vscale x 2 x i1> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -2136,12 +2186,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
     i16 0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2156,12 +2206,12 @@ entry:
     <vscale x 4 x i16> %1,
     i16 1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsgeu_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_vi_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -2171,12 +2221,12 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
     i16 2,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -2191,12 +2241,12 @@ entry:
     <vscale x 8 x i16> %1,
     i16 3,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmsgeu_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgeu_vi_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -2206,12 +2256,12 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
     i16 4,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -2226,12 +2276,12 @@ entry:
     <vscale x 16 x i16> %1,
     i16 5,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -2241,12 +2291,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     i32 6,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2261,12 +2311,12 @@ entry:
     <vscale x 1 x i32> %1,
     i32 7,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -2276,12 +2326,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     i32 8,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2296,12 +2346,12 @@ entry:
     <vscale x 2 x i32> %1,
     i32 9,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -2311,12 +2361,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     i32 10,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -2331,12 +2381,12 @@ entry:
     <vscale x 4 x i32> %1,
     i32 11,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsgeu_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_vi_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -2346,12 +2396,12 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     i32 12,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -2366,12 +2416,12 @@ entry:
     <vscale x 8 x i32> %1,
     i32 13,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -2381,12 +2431,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 14,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2401,12 +2451,12 @@ entry:
     <vscale x 1 x i64> %1,
     i64 15,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -2416,12 +2466,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 16,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -2436,12 +2486,12 @@ entry:
     <vscale x 2 x i64> %1,
     i64 -15,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -2451,12 +2501,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 -14,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -2471,13 +2521,13 @@ entry:
     <vscale x 4 x i64> %1,
     i64 -13,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
 
 ; Test cases where the mask and maskedoff are the same value.
-define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -2490,12 +2540,12 @@ entry:
     <vscale x 1 x i8> %1,
     i8 %2,
     <vscale x 1 x i1> %0,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -2508,12 +2558,12 @@ entry:
     <vscale x 2 x i8> %1,
     i8 %2,
     <vscale x 2 x i1> %0,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -2526,12 +2576,12 @@ entry:
     <vscale x 4 x i8> %1,
     i8 %2,
     <vscale x 4 x i1> %0,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -2544,12 +2594,12 @@ entry:
     <vscale x 8 x i8> %1,
     i8 %2,
     <vscale x 8 x i1> %0,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -2562,12 +2612,12 @@ entry:
     <vscale x 16 x i8> %1,
     i8 %2,
     <vscale x 16 x i1> %0,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 32 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, i32 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -2580,12 +2630,12 @@ entry:
     <vscale x 32 x i8> %1,
     i8 %2,
     <vscale x 32 x i1> %0,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 32 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -2598,12 +2648,12 @@ entry:
     <vscale x 1 x i16> %1,
     i16 %2,
     <vscale x 1 x i1> %0,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -2616,12 +2666,12 @@ entry:
     <vscale x 2 x i16> %1,
     i16 %2,
     <vscale x 2 x i1> %0,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -2634,12 +2684,12 @@ entry:
     <vscale x 4 x i16> %1,
     i16 %2,
     <vscale x 4 x i1> %0,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -2652,12 +2702,12 @@ entry:
     <vscale x 8 x i16> %1,
     i16 %2,
     <vscale x 8 x i1> %0,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -2670,12 +2720,12 @@ entry:
     <vscale x 16 x i16> %1,
     i16 %2,
     <vscale x 16 x i1> %0,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -2688,12 +2738,12 @@ entry:
     <vscale x 1 x i32> %1,
     i32 %2,
     <vscale x 1 x i1> %0,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -2706,12 +2756,12 @@ entry:
     <vscale x 2 x i32> %1,
     i32 %2,
     <vscale x 2 x i1> %0,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -2724,12 +2774,12 @@ entry:
     <vscale x 4 x i32> %1,
     i32 %2,
     <vscale x 4 x i1> %0,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -2742,80 +2792,101 @@ entry:
     <vscale x 8 x i32> %1,
     i32 %2,
     <vscale x 8 x i1> %0,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT:    vlse64.v v9, (a0), zero
-; CHECK-NEXT:    vmsleu.vv v0, v9, v8, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vmsleu.vv v0, v9, v8, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT:    vmsltu.vx v8, v8, a0
+; RV64-NEXT:    vmandn.mm v0, v0, v8
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i64> %1,
     i64 %2,
     <vscale x 1 x i1> %0,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vmsleu.vv v10, v12, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vmv1r.v v10, v0
+; RV32-NEXT:    vmsleu.vv v10, v12, v8, v0.t
+; RV32-NEXT:    vmv1r.v v0, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT:    vmsltu.vx v10, v8, a0
+; RV64-NEXT:    vmandn.mm v0, v0, v10
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i64> %1,
     i64 %2,
     <vscale x 2 x i1> %0,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vmv1r.v v12, v0
-; CHECK-NEXT:    vmsleu.vv v12, v16, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vmv1r.v v12, v0
+; RV32-NEXT:    vmsleu.vv v12, v16, v8, v0.t
+; RV32-NEXT:    vmv1r.v v0, v12
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT:    vmsltu.vx v12, v8, a0
+; RV64-NEXT:    vmandn.mm v0, v0, v12
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i64> %1,
     i64 %2,
     <vscale x 4 x i1> %0,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll
deleted file mode 100644
index c86f7e41e2b36..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll
+++ /dev/null
@@ -1,2414 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmslt.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmslt.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8(
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i8> %3,
-    <vscale x 1 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmslt.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmslt.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8(
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i8> %3,
-    <vscale x 2 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmslt.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmslt.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8(
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i8> %3,
-    <vscale x 4 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgt_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmslt.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmslt.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8(
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i8> %3,
-    <vscale x 8 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgt_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vv_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vmslt.vv v0, v10, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmslt.vv v14, v10, v8
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmslt.vv v8, v12, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8(
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i8> %3,
-    <vscale x 16 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsgt_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vv_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vmslt.vv v0, v12, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8(
-  <vscale x 32 x i1>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmslt.vv v20, v12, v8
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmslt.vv v8, v16, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8(
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i8> %3,
-    <vscale x 32 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vmslt.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmslt.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16(
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i16> %3,
-    <vscale x 1 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vmslt.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmslt.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16(
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i16> %3,
-    <vscale x 2 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vmslt.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmslt.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16(
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i16> %3,
-    <vscale x 4 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgt_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vmslt.vv v0, v10, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vmslt.vv v14, v10, v8
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmslt.vv v8, v12, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16(
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i16> %3,
-    <vscale x 8 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgt_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vv_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vmslt.vv v0, v12, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vmslt.vv v20, v12, v8
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmslt.vv v8, v16, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16(
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i16> %3,
-    <vscale x 16 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vmslt.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmslt.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32(
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    i64 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i32> %3,
-    <vscale x 1 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vmslt.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmslt.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32(
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    i64 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i32> %3,
-    <vscale x 2 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmslt.vv v0, v10, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vmslt.vv v14, v10, v8
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmslt.vv v8, v12, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32(
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    i64 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i32> %3,
-    <vscale x 4 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgt_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmslt.vv v0, v12, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vmslt.vv v20, v12, v8
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmslt.vv v8, v16, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32(
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    i64 %4)
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i32> %3,
-    <vscale x 8 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vmslt.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmslt.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64(
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    i64 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i64> %3,
-    <vscale x 1 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vmslt.vv v0, v10, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vmslt.vv v14, v10, v8
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmslt.vv v8, v12, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64(
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    i64 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i64> %3,
-    <vscale x 2 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vmslt.vv v0, v12, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vmslt.vv v20, v12, v8
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmslt.vv v8, v16, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64(
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    i64 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i64> %3,
-    <vscale x 4 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  i8,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgt.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i8> %1,
-    i8 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  i8,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgt.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i8> %1,
-    i8 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  i8,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgt.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i8> %1,
-    i8 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  i8,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgt_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgt.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i8> %1,
-    i8 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  i8,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgt_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsgt.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i8> %1,
-    i8 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  i8,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsgt_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8(
-  <vscale x 32 x i1>,
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsgt.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i8> %1,
-    i8 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  i16,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgt.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i16> %1,
-    i16 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  i16,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgt.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i16> %1,
-    i16 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  i16,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgt.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i16> %1,
-    i16 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  i16,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgt_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsgt.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i16> %1,
-    i16 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  i16,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgt_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsgt.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i16> %1,
-    i16 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  i32,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgt.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  i32,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgt.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  i32,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsgt.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  i32,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgt_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsgt.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  i64,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64.i64(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i64>,
-  i64,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgt.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64.i64(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  i64,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64.i64(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i64>,
-  i64,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsgt.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64.i64(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  i64,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64.i64(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i64>,
-  i64,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsgt.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64.i64(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    i8 9,
-    i64 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgt.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i8> %1,
-    i8 9,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    i8 9,
-    i64 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgt.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i8> %1,
-    i8 9,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    i8 9,
-    i64 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgt.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i8> %1,
-    i8 9,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgt_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vi_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    i8 9,
-    i64 %1)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgt.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i8> %1,
-    i8 9,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsgt_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vi_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    i8 9,
-    i64 %1)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsgt.vi v11, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i8> %1,
-    i8 9,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmsgt_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vi_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    i8 9,
-    i64 %1)
-
-  ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsgt.vi v13, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i8> %1,
-    i8 9,
-    <vscale x 32 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    i16 9,
-    i64 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgt.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i16> %1,
-    i16 9,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    i16 9,
-    i64 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgt.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i16> %1,
-    i16 9,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    i16 9,
-    i64 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgt.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i16> %1,
-    i16 9,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgt_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vi_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    i16 9,
-    i64 %1)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsgt.vi v11, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i16> %1,
-    i16 9,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsgt_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vi_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    i16 9,
-    i64 %1)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsgt.vi v13, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i16> %1,
-    i16 9,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    i32 9,
-    i64 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgt.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i32> %1,
-    i32 9,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    i32 9,
-    i64 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgt.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i32> %1,
-    i32 9,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    i32 9,
-    i64 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsgt.vi v11, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i32> %1,
-    i32 9,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgt_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vi_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    i32 9,
-    i64 %1)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsgt.vi v13, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i32> %1,
-    i32 9,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgt.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64.i64(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i64> %1,
-    i64 9,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsgt.vi v11, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64.i64(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i64> %1,
-    i64 9,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsgt.vi v13, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64.i64(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i64> %1,
-    i64 9,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgt.ll
similarity index 84%
rename from llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vmsgt.ll
index fc820e09850fc..bde1030c55ead 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgt.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
 declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgt_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -16,7 +18,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -26,9 +28,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -42,13 +44,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8(
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i8> %3,
     <vscale x 1 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -56,9 +58,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgt_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -68,7 +70,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -78,9 +80,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -94,13 +96,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8(
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i8> %3,
     <vscale x 2 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -108,9 +110,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgt_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -120,7 +122,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -130,9 +132,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -146,13 +148,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8(
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i8> %3,
     <vscale x 4 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -160,9 +162,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsgt_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgt_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -172,7 +174,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -182,9 +184,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -198,13 +200,13 @@ entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8(
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i8> %3,
     <vscale x 8 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -212,9 +214,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsgt_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgt_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -224,7 +226,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -234,9 +236,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -250,13 +252,13 @@ entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8(
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8(
     <vscale x 16 x i1> %0,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i8> %3,
     <vscale x 16 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -264,9 +266,9 @@ entry:
 declare <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsgt_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsgt_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -276,7 +278,7 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i1> %a
 }
@@ -286,9 +288,9 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -302,13 +304,13 @@ entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8(
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8(
     <vscale x 32 x i1> %0,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i8> %3,
     <vscale x 32 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 32 x i1> %a
 }
@@ -316,9 +318,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgt_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -328,7 +330,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -338,9 +340,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -354,13 +356,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16(
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i16> %3,
     <vscale x 1 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -368,9 +370,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgt_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -380,7 +382,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -390,9 +392,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -406,13 +408,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16(
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i16> %3,
     <vscale x 2 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -420,9 +422,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgt_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -432,7 +434,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -442,9 +444,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -458,13 +460,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16(
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i16> %3,
     <vscale x 4 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -472,9 +474,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsgt_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgt_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -484,7 +486,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -494,9 +496,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -510,13 +512,13 @@ entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16(
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i16> %3,
     <vscale x 8 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -524,9 +526,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsgt_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgt_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -536,7 +538,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -546,9 +548,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -562,13 +564,13 @@ entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16(
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16(
     <vscale x 16 x i1> %0,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i16> %3,
     <vscale x 16 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -576,9 +578,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgt_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -588,7 +590,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -598,9 +600,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -614,13 +616,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32(
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i32> %3,
     <vscale x 1 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -628,9 +630,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgt_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -640,7 +642,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -650,9 +652,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -666,13 +668,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32(
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i32> %3,
     <vscale x 2 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -680,9 +682,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgt_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -692,7 +694,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -702,9 +704,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -718,13 +720,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32(
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i32> %3,
     <vscale x 4 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -732,9 +734,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsgt_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgt_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -744,7 +746,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -754,9 +756,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -770,13 +772,13 @@ entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32(
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i32> %3,
     <vscale x 8 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -784,9 +786,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgt_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -796,7 +798,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -806,9 +808,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -822,13 +824,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64(
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i64> %3,
     <vscale x 1 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -836,9 +838,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgt_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -848,7 +850,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -858,9 +860,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -874,13 +876,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64(
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i64> %3,
     <vscale x 2 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -888,9 +890,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgt_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -900,7 +902,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -910,9 +912,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -926,13 +928,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64(
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i64> %3,
     <vscale x 4 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -940,9 +942,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -952,7 +954,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8.i8(
     <vscale x 1 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -962,9 +964,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -979,7 +981,7 @@ entry:
     <vscale x 1 x i8> %1,
     i8 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -987,9 +989,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -999,7 +1001,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8.i8(
     <vscale x 2 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1009,9 +1011,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1026,7 +1028,7 @@ entry:
     <vscale x 2 x i8> %1,
     i8 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1034,9 +1036,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -1046,7 +1048,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8.i8(
     <vscale x 4 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1056,9 +1058,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1073,7 +1075,7 @@ entry:
     <vscale x 4 x i8> %1,
     i8 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1081,9 +1083,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsgt_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgt_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -1093,7 +1095,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8.i8(
     <vscale x 8 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1103,9 +1105,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1120,7 +1122,7 @@ entry:
     <vscale x 8 x i8> %1,
     i8 %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1128,9 +1130,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsgt_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgt_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -1140,7 +1142,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8.i8(
     <vscale x 16 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1150,9 +1152,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -1167,7 +1169,7 @@ entry:
     <vscale x 16 x i8> %1,
     i8 %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1175,9 +1177,9 @@ entry:
 declare <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsgt_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsgt_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -1187,7 +1189,7 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8.i8(
     <vscale x 32 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i1> %a
 }
@@ -1197,9 +1199,9 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -1214,7 +1216,7 @@ entry:
     <vscale x 32 x i8> %1,
     i8 %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 32 x i1> %a
 }
@@ -1222,9 +1224,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1234,7 +1236,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16.i16(
     <vscale x 1 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1244,9 +1246,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1261,7 +1263,7 @@ entry:
     <vscale x 1 x i16> %1,
     i16 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1269,9 +1271,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1281,7 +1283,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16.i16(
     <vscale x 2 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1291,9 +1293,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1308,7 +1310,7 @@ entry:
     <vscale x 2 x i16> %1,
     i16 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1316,9 +1318,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1328,7 +1330,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16.i16(
     <vscale x 4 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1338,9 +1340,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1355,7 +1357,7 @@ entry:
     <vscale x 4 x i16> %1,
     i16 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1363,9 +1365,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsgt_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgt_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1375,7 +1377,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16.i16(
     <vscale x 8 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1385,9 +1387,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -1402,7 +1404,7 @@ entry:
     <vscale x 8 x i16> %1,
     i16 %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1410,9 +1412,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsgt_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgt_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1422,7 +1424,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16.i16(
     <vscale x 16 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1432,9 +1434,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -1449,7 +1451,7 @@ entry:
     <vscale x 16 x i16> %1,
     i16 %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1457,9 +1459,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1469,7 +1471,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1479,9 +1481,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1496,7 +1498,7 @@ entry:
     <vscale x 1 x i32> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1504,9 +1506,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1516,7 +1518,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1526,9 +1528,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1543,7 +1545,7 @@ entry:
     <vscale x 2 x i32> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1551,9 +1553,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1563,7 +1565,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1573,9 +1575,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -1590,7 +1592,7 @@ entry:
     <vscale x 4 x i32> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1598,9 +1600,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsgt_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgt_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1610,7 +1612,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1620,9 +1622,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -1637,7 +1639,7 @@ entry:
     <vscale x 8 x i32> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1645,25 +1647,31 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT:    vlse64.v v9, (a0), zero
-; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsgt_vx_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vmslt.vv v0, v9, v8
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsgt_vx_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT:    vmsgt.vx v0, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1673,30 +1681,39 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
   <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT:    vlse64.v v11, (a0), zero
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmslt.vv v10, v11, v8, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsgt_mask_vx_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT:    vlse64.v v11, (a0), zero
+; RV32-NEXT:    vmv1r.v v10, v0
+; RV32-NEXT:    vmv1r.v v0, v9
+; RV32-NEXT:    vmslt.vv v10, v11, v8, v0.t
+; RV32-NEXT:    vmv.v.v v0, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsgt_mask_vx_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vmv1r.v v10, v0
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT:    vmv1r.v v0, v9
+; RV64-NEXT:    vmsgt.vx v10, v8, a0, v0.t
+; RV64-NEXT:    vmv.v.v v0, v10
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i64> %1,
     i64 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1704,25 +1721,31 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vmslt.vv v0, v10, v8
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsgt_vx_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vmslt.vv v0, v10, v8
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsgt_vx_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT:    vmsgt.vx v0, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1732,30 +1755,39 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
   <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmslt.vv v11, v12, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsgt_mask_vx_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vmv1r.v v11, v0
+; RV32-NEXT:    vmv1r.v v0, v10
+; RV32-NEXT:    vmslt.vv v11, v12, v8, v0.t
+; RV32-NEXT:    vmv1r.v v0, v11
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsgt_mask_vx_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vmv1r.v v11, v0
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT:    vmv1r.v v0, v10
+; RV64-NEXT:    vmsgt.vx v11, v8, a0, v0.t
+; RV64-NEXT:    vmv1r.v v0, v11
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i64> %1,
     i64 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1763,25 +1795,31 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vmslt.vv v0, v12, v8
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsgt_vx_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vmslt.vv v0, v12, v8
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsgt_vx_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT:    vmsgt.vx v0, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1791,35 +1829,44 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
   <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmslt.vv v13, v16, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsgt_mask_vx_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vmv1r.v v13, v0
+; RV32-NEXT:    vmv1r.v v0, v12
+; RV32-NEXT:    vmslt.vv v13, v16, v8, v0.t
+; RV32-NEXT:    vmv1r.v v0, v13
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsgt_mask_vx_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vmv1r.v v13, v0
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT:    vmv1r.v v0, v12
+; RV64-NEXT:    vmsgt.vx v13, v8, a0, v0.t
+; RV64-NEXT:    vmv1r.v v0, v13
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i64> %1,
     i64 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -1829,12 +1876,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8.i8(
     <vscale x 1 x i8> %0,
     i8 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1849,12 +1896,12 @@ entry:
     <vscale x 1 x i8> %1,
     i8 9,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -1864,12 +1911,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8.i8(
     <vscale x 2 x i8> %0,
     i8 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1884,12 +1931,12 @@ entry:
     <vscale x 2 x i8> %1,
     i8 9,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -1899,12 +1946,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8.i8(
     <vscale x 4 x i8> %0,
     i8 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1919,12 +1966,12 @@ entry:
     <vscale x 4 x i8> %1,
     i8 9,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsgt_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgt_vi_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -1934,12 +1981,12 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8.i8(
     <vscale x 8 x i8> %0,
     i8 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1954,12 +2001,12 @@ entry:
     <vscale x 8 x i8> %1,
     i8 9,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmsgt_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgt_vi_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -1969,12 +2016,12 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8.i8(
     <vscale x 16 x i8> %0,
     i8 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -1989,12 +2036,12 @@ entry:
     <vscale x 16 x i8> %1,
     i8 9,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 32 x i1> @intrinsic_vmsgt_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsgt_vi_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -2004,12 +2051,12 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8.i8(
     <vscale x 32 x i8> %0,
     i8 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 32 x i1> %a
 }
 
-define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -2024,12 +2071,12 @@ entry:
     <vscale x 32 x i8> %1,
     i8 9,
     <vscale x 32 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 32 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -2039,12 +2086,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16.i16(
     <vscale x 1 x i16> %0,
     i16 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2059,12 +2106,12 @@ entry:
     <vscale x 1 x i16> %1,
     i16 9,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -2074,12 +2121,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16.i16(
     <vscale x 2 x i16> %0,
     i16 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2094,12 +2141,12 @@ entry:
     <vscale x 2 x i16> %1,
     i16 9,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -2109,12 +2156,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16.i16(
     <vscale x 4 x i16> %0,
     i16 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2129,12 +2176,12 @@ entry:
     <vscale x 4 x i16> %1,
     i16 9,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsgt_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgt_vi_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -2144,12 +2191,12 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16.i16(
     <vscale x 8 x i16> %0,
     i16 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -2164,12 +2211,12 @@ entry:
     <vscale x 8 x i16> %1,
     i16 9,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmsgt_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgt_vi_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -2179,12 +2226,12 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16.i16(
     <vscale x 16 x i16> %0,
     i16 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -2199,12 +2246,12 @@ entry:
     <vscale x 16 x i16> %1,
     i16 9,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -2214,12 +2261,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     i32 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2234,12 +2281,12 @@ entry:
     <vscale x 1 x i32> %1,
     i32 9,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -2249,12 +2296,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     i32 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2269,12 +2316,12 @@ entry:
     <vscale x 2 x i32> %1,
     i32 9,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -2284,12 +2331,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     i32 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -2304,12 +2351,12 @@ entry:
     <vscale x 4 x i32> %1,
     i32 9,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsgt_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgt_vi_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -2319,12 +2366,12 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     i32 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -2339,12 +2386,12 @@ entry:
     <vscale x 8 x i32> %1,
     i32 9,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -2354,12 +2401,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2374,12 +2421,12 @@ entry:
     <vscale x 1 x i64> %1,
     i64 9,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -2389,12 +2436,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -2409,12 +2456,12 @@ entry:
     <vscale x 2 x i64> %1,
     i64 9,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -2424,12 +2471,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -2444,7 +2491,7 @@ entry:
     <vscale x 4 x i64> %1,
     i64 9,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll
deleted file mode 100644
index 049450e5500c2..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll
+++ /dev/null
@@ -1,2414 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmsltu.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsltu.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8(
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i8> %3,
-    <vscale x 1 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmsltu.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsltu.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8(
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i8> %3,
-    <vscale x 2 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmsltu.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsltu.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8(
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i8> %3,
-    <vscale x 4 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgtu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmsltu.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmsltu.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8(
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i8> %3,
-    <vscale x 8 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgtu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vmsltu.vv v0, v10, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmsltu.vv v14, v10, v8
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmsltu.vv v8, v12, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8(
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i8> %3,
-    <vscale x 16 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsgtu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vmsltu.vv v0, v12, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8(
-  <vscale x 32 x i1>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmsltu.vv v20, v12, v8
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmsltu.vv v8, v16, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8(
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i8> %3,
-    <vscale x 32 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vmsltu.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsltu.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16(
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i16> %3,
-    <vscale x 1 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vmsltu.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsltu.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16(
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i16> %3,
-    <vscale x 2 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vmsltu.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmsltu.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16(
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i16> %3,
-    <vscale x 4 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgtu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vmsltu.vv v0, v10, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vmsltu.vv v14, v10, v8
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmsltu.vv v8, v12, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16(
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i16> %3,
-    <vscale x 8 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgtu_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vmsltu.vv v0, v12, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vmsltu.vv v20, v12, v8
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmsltu.vv v8, v16, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16(
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i16> %3,
-    <vscale x 16 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vmsltu.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsltu.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32(
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    i64 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i32> %3,
-    <vscale x 1 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vmsltu.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmsltu.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32(
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    i64 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i32> %3,
-    <vscale x 2 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmsltu.vv v0, v10, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vmsltu.vv v14, v10, v8
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmsltu.vv v8, v12, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32(
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    i64 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i32> %3,
-    <vscale x 4 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgtu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmsltu.vv v0, v12, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vmsltu.vv v20, v12, v8
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmsltu.vv v8, v16, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32(
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    i64 %4)
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i32> %3,
-    <vscale x 8 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vmsltu.vv v8, v9, v8
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmsltu.vv v11, v10, v9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64(
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    i64 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i64> %3,
-    <vscale x 1 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vmsltu.vv v0, v10, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vmsltu.vv v14, v10, v8
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmsltu.vv v8, v12, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64(
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    i64 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i64> %3,
-    <vscale x 2 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vmsltu.vv v0, v12, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vmsltu.vv v20, v12, v8
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmsltu.vv v8, v16, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64(
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    i64 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i64> %3,
-    <vscale x 4 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  i8,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgtu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i8> %1,
-    i8 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  i8,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgtu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i8> %1,
-    i8 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  i8,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgtu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i8> %1,
-    i8 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  i8,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgtu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgtu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i8> %1,
-    i8 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  i8,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgtu_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsgtu.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i8> %1,
-    i8 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  i8,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsgtu_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8(
-  <vscale x 32 x i1>,
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsgtu.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i8> %1,
-    i8 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  i16,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgtu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i16> %1,
-    i16 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  i16,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgtu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i16> %1,
-    i16 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  i16,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgtu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i16> %1,
-    i16 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  i16,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgtu_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsgtu.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i16> %1,
-    i16 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  i16,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgtu_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsgtu.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i16> %1,
-    i16 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  i32,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgtu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  i32,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgtu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  i32,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsgtu.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  i32,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgtu_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsgtu.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  i64,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64.i64(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i64>,
-  i64,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgtu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64.i64(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  i64,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64.i64(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i64>,
-  i64,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsgtu.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64.i64(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  i64,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64.i64(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i64>,
-  i64,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsgtu.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64.i64(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    i8 9,
-    i64 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgtu.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i8> %1,
-    i8 9,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    i8 9,
-    i64 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgtu.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i8> %1,
-    i8 9,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    i8 9,
-    i64 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgtu.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i8> %1,
-    i8 9,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgtu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    i8 9,
-    i64 %1)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgtu.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i8> %1,
-    i8 9,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsgtu_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    i8 9,
-    i64 %1)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsgtu.vi v11, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i8> %1,
-    i8 9,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmsgtu_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    i8 9,
-    i64 %1)
-
-  ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsgtu.vi v13, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i8> %1,
-    i8 9,
-    <vscale x 32 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    i16 9,
-    i64 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgtu.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i16> %1,
-    i16 9,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    i16 9,
-    i64 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgtu.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i16> %1,
-    i16 9,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    i16 9,
-    i64 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgtu.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i16> %1,
-    i16 9,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgtu_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    i16 9,
-    i64 %1)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsgtu.vi v11, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i16> %1,
-    i16 9,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsgtu_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    i16 9,
-    i64 %1)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsgtu.vi v13, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i16> %1,
-    i16 9,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    i32 9,
-    i64 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgtu.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i32> %1,
-    i32 9,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    i32 9,
-    i64 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgtu.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i32> %1,
-    i32 9,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    i32 9,
-    i64 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsgtu.vi v11, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i32> %1,
-    i32 9,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgtu_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    i32 9,
-    i64 %1)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsgtu.vi v13, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i32> %1,
-    i32 9,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsgtu.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64.i64(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i64> %1,
-    i64 9,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsgtu.vi v11, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64.i64(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i64> %1,
-    i64 9,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsgtu.vi v13, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64.i64(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i64> %1,
-    i64 9,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll
similarity index 85%
rename from llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll
index cec42c16f4bed..3416060b670e5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
 declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -16,7 +18,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -26,9 +28,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -42,13 +44,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8(
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i8> %3,
     <vscale x 1 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -56,9 +58,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -68,7 +70,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -78,9 +80,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -94,13 +96,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8(
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i8> %3,
     <vscale x 2 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -108,9 +110,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -120,7 +122,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -130,9 +132,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -146,13 +148,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8(
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i8> %3,
     <vscale x 4 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -160,9 +162,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsgtu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgtu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -172,7 +174,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -182,9 +184,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -198,13 +200,13 @@ entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8(
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i8> %3,
     <vscale x 8 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -212,9 +214,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsgtu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgtu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -224,7 +226,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -234,9 +236,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -250,13 +252,13 @@ entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8(
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8(
     <vscale x 16 x i1> %0,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i8> %3,
     <vscale x 16 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -264,9 +266,9 @@ entry:
 declare <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsgtu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsgtu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -276,7 +278,7 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i1> %a
 }
@@ -286,9 +288,9 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -302,13 +304,13 @@ entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8(
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8(
     <vscale x 32 x i1> %0,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i8> %3,
     <vscale x 32 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 32 x i1> %a
 }
@@ -316,9 +318,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -328,7 +330,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -338,9 +340,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -354,13 +356,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16(
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i16> %3,
     <vscale x 1 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -368,9 +370,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -380,7 +382,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -390,9 +392,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -406,13 +408,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16(
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i16> %3,
     <vscale x 2 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -420,9 +422,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -432,7 +434,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -442,9 +444,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -458,13 +460,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16(
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i16> %3,
     <vscale x 4 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -472,9 +474,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsgtu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgtu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -484,7 +486,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -494,9 +496,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -510,13 +512,13 @@ entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16(
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i16> %3,
     <vscale x 8 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -524,9 +526,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsgtu_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgtu_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -536,7 +538,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -546,9 +548,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -562,13 +564,13 @@ entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16(
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16(
     <vscale x 16 x i1> %0,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i16> %3,
     <vscale x 16 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -576,9 +578,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -588,7 +590,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -598,9 +600,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -614,13 +616,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32(
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i32> %3,
     <vscale x 1 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -628,9 +630,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -640,7 +642,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -650,9 +652,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -666,13 +668,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32(
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i32> %3,
     <vscale x 2 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -680,9 +682,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -692,7 +694,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -702,9 +704,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -718,13 +720,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32(
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i32> %3,
     <vscale x 4 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -732,9 +734,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsgtu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgtu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -744,7 +746,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -754,9 +756,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -770,13 +772,13 @@ entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32(
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i32> %3,
     <vscale x 8 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -784,9 +786,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -796,7 +798,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -806,9 +808,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -822,13 +824,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64(
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i64> %3,
     <vscale x 1 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -836,9 +838,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -848,7 +850,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -858,9 +860,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -874,13 +876,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64(
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i64> %3,
     <vscale x 2 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -888,9 +890,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -900,7 +902,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -910,9 +912,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -926,13 +928,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64(
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i64> %3,
     <vscale x 4 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -940,9 +942,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -952,7 +954,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -962,9 +964,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -979,7 +981,7 @@ entry:
     <vscale x 1 x i8> %1,
     i8 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -987,9 +989,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -999,7 +1001,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1009,9 +1011,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1026,7 +1028,7 @@ entry:
     <vscale x 2 x i8> %1,
     i8 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1034,9 +1036,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -1046,7 +1048,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1056,9 +1058,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1073,7 +1075,7 @@ entry:
     <vscale x 4 x i8> %1,
     i8 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1081,9 +1083,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsgtu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgtu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -1093,7 +1095,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1103,9 +1105,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1120,7 +1122,7 @@ entry:
     <vscale x 8 x i8> %1,
     i8 %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1128,9 +1130,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsgtu_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgtu_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -1140,7 +1142,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1150,9 +1152,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -1167,7 +1169,7 @@ entry:
     <vscale x 16 x i8> %1,
     i8 %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1175,9 +1177,9 @@ entry:
 declare <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsgtu_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsgtu_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -1187,7 +1189,7 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i1> %a
 }
@@ -1197,9 +1199,9 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -1214,7 +1216,7 @@ entry:
     <vscale x 32 x i8> %1,
     i8 %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 32 x i1> %a
 }
@@ -1222,9 +1224,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1234,7 +1236,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1244,9 +1246,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1261,7 +1263,7 @@ entry:
     <vscale x 1 x i16> %1,
     i16 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1269,9 +1271,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1281,7 +1283,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1291,9 +1293,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1308,7 +1310,7 @@ entry:
     <vscale x 2 x i16> %1,
     i16 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1316,9 +1318,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1328,7 +1330,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1338,9 +1340,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1355,7 +1357,7 @@ entry:
     <vscale x 4 x i16> %1,
     i16 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1363,9 +1365,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsgtu_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgtu_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1375,7 +1377,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1385,9 +1387,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -1402,7 +1404,7 @@ entry:
     <vscale x 8 x i16> %1,
     i16 %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1410,9 +1412,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsgtu_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgtu_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1422,7 +1424,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1432,9 +1434,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -1449,7 +1451,7 @@ entry:
     <vscale x 16 x i16> %1,
     i16 %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1457,9 +1459,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1469,7 +1471,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1479,9 +1481,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1496,7 +1498,7 @@ entry:
     <vscale x 1 x i32> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1504,9 +1506,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1516,7 +1518,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1526,9 +1528,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1543,7 +1545,7 @@ entry:
     <vscale x 2 x i32> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1551,9 +1553,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1563,7 +1565,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1573,9 +1575,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -1590,7 +1592,7 @@ entry:
     <vscale x 4 x i32> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1598,9 +1600,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsgtu_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgtu_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1610,7 +1612,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1620,9 +1622,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -1637,7 +1639,7 @@ entry:
     <vscale x 8 x i32> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1645,25 +1647,31 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT:    vlse64.v v9, (a0), zero
-; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsgtu_vx_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vmsltu.vv v0, v9, v8
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsgtu_vx_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT:    vmsgtu.vx v0, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1673,30 +1681,39 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
   <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT:    vlse64.v v11, (a0), zero
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsltu.vv v10, v11, v8, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT:    vlse64.v v11, (a0), zero
+; RV32-NEXT:    vmv1r.v v10, v0
+; RV32-NEXT:    vmv1r.v v0, v9
+; RV32-NEXT:    vmsltu.vv v10, v11, v8, v0.t
+; RV32-NEXT:    vmv.v.v v0, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vmv1r.v v10, v0
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT:    vmv1r.v v0, v9
+; RV64-NEXT:    vmsgtu.vx v10, v8, a0, v0.t
+; RV64-NEXT:    vmv.v.v v0, v10
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i64> %1,
     i64 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1704,25 +1721,31 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vmsltu.vv v0, v10, v8
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsgtu_vx_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vmsltu.vv v0, v10, v8
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsgtu_vx_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT:    vmsgtu.vx v0, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1732,30 +1755,39 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
   <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsltu.vv v11, v12, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vmv1r.v v11, v0
+; RV32-NEXT:    vmv1r.v v0, v10
+; RV32-NEXT:    vmsltu.vv v11, v12, v8, v0.t
+; RV32-NEXT:    vmv1r.v v0, v11
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vmv1r.v v11, v0
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT:    vmv1r.v v0, v10
+; RV64-NEXT:    vmsgtu.vx v11, v8, a0, v0.t
+; RV64-NEXT:    vmv1r.v v0, v11
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i64> %1,
     i64 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1763,25 +1795,31 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vmsltu.vv v0, v12, v8
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsgtu_vx_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vmsltu.vv v0, v12, v8
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsgtu_vx_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT:    vmsgtu.vx v0, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1791,35 +1829,44 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
   <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsltu.vv v13, v16, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vmv1r.v v13, v0
+; RV32-NEXT:    vmv1r.v v0, v12
+; RV32-NEXT:    vmsltu.vv v13, v16, v8, v0.t
+; RV32-NEXT:    vmv1r.v v0, v13
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vmv1r.v v13, v0
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT:    vmv1r.v v0, v12
+; RV64-NEXT:    vmsgtu.vx v13, v8, a0, v0.t
+; RV64-NEXT:    vmv1r.v v0, v13
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i64> %1,
     i64 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -1829,12 +1876,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
     i8 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1849,12 +1896,12 @@ entry:
     <vscale x 1 x i8> %1,
     i8 9,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -1864,12 +1911,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
     i8 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1884,12 +1931,12 @@ entry:
     <vscale x 2 x i8> %1,
     i8 9,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -1899,12 +1946,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
     i8 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1919,12 +1966,12 @@ entry:
     <vscale x 4 x i8> %1,
     i8 9,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsgtu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgtu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -1934,12 +1981,12 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
     i8 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1954,12 +2001,12 @@ entry:
     <vscale x 8 x i8> %1,
     i8 9,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmsgtu_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgtu_vi_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -1969,12 +2016,12 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
     i8 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -1989,12 +2036,12 @@ entry:
     <vscale x 16 x i8> %1,
     i8 9,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 32 x i1> @intrinsic_vmsgtu_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsgtu_vi_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -2004,12 +2051,12 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
     i8 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 32 x i1> %a
 }
 
-define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -2024,12 +2071,12 @@ entry:
     <vscale x 32 x i8> %1,
     i8 9,
     <vscale x 32 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 32 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -2039,12 +2086,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
     i16 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2059,12 +2106,12 @@ entry:
     <vscale x 1 x i16> %1,
     i16 9,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -2074,12 +2121,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
     i16 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2094,12 +2141,12 @@ entry:
     <vscale x 2 x i16> %1,
     i16 9,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -2109,12 +2156,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
     i16 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2129,12 +2176,12 @@ entry:
     <vscale x 4 x i16> %1,
     i16 9,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsgtu_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgtu_vi_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -2144,12 +2191,12 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
     i16 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -2164,12 +2211,12 @@ entry:
     <vscale x 8 x i16> %1,
     i16 9,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmsgtu_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgtu_vi_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -2179,12 +2226,12 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
     i16 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -2199,12 +2246,12 @@ entry:
     <vscale x 16 x i16> %1,
     i16 9,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -2214,12 +2261,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     i32 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2234,12 +2281,12 @@ entry:
     <vscale x 1 x i32> %1,
     i32 9,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -2249,12 +2296,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     i32 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2269,12 +2316,12 @@ entry:
     <vscale x 2 x i32> %1,
     i32 9,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -2284,12 +2331,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     i32 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -2304,12 +2351,12 @@ entry:
     <vscale x 4 x i32> %1,
     i32 9,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsgtu_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgtu_vi_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -2319,12 +2366,12 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     i32 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -2339,12 +2386,12 @@ entry:
     <vscale x 8 x i32> %1,
     i32 9,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -2354,12 +2401,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2374,12 +2421,12 @@ entry:
     <vscale x 1 x i64> %1,
     i64 9,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -2389,12 +2436,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -2409,12 +2456,12 @@ entry:
     <vscale x 2 x i64> %1,
     i64 9,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -2424,12 +2471,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -2444,7 +2491,7 @@ entry:
     <vscale x 4 x i64> %1,
     i64 9,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll
deleted file mode 100644
index 2d2e26183f0c0..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll
+++ /dev/null
@@ -1,2450 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmsle.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsle.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8(
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    i32 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i8> %3,
-    <vscale x 1 x i1> %mask,
-    i32 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmsle.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsle.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8(
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    i32 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i8> %3,
-    <vscale x 2 x i1> %mask,
-    i32 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmsle.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsle.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8(
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    i32 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i8> %3,
-    <vscale x 4 x i1> %mask,
-    i32 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsle_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vv_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmsle.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmsle.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8(
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    i32 %4)
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i8> %3,
-    <vscale x 8 x i1> %mask,
-    i32 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i32);
-
-define <vscale x 16 x i1> @intrinsic_vmsle_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vv_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vmsle.vv v0, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i1> @intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmsle.vv v14, v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmsle.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8(
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    i32 %4)
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i8> %3,
-    <vscale x 16 x i1> %mask,
-    i32 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i32);
-
-define <vscale x 32 x i1> @intrinsic_vmsle_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vv_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vmsle.vv v0, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8(
-  <vscale x 32 x i1>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i32);
-
-define <vscale x 32 x i1> @intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmsle.vv v20, v8, v12
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmsle.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8(
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    i32 %4)
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i8> %3,
-    <vscale x 32 x i1> %mask,
-    i32 %4)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vmsle.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsle.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16(
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    i32 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i16> %3,
-    <vscale x 1 x i1> %mask,
-    i32 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vmsle.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsle.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16(
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    i32 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i16> %3,
-    <vscale x 2 x i1> %mask,
-    i32 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vmsle.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmsle.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16(
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    i32 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i16> %3,
-    <vscale x 4 x i1> %mask,
-    i32 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsle_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vv_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vmsle.vv v0, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vmsle.vv v14, v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmsle.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16(
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    i32 %4)
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i16> %3,
-    <vscale x 8 x i1> %mask,
-    i32 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i32);
-
-define <vscale x 16 x i1> @intrinsic_vmsle_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vv_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vmsle.vv v0, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i1> @intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vmsle.vv v20, v8, v12
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmsle.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16(
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    i32 %4)
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i16> %3,
-    <vscale x 16 x i1> %mask,
-    i32 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vmsle.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsle.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32(
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    i32 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i32> %3,
-    <vscale x 1 x i1> %mask,
-    i32 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vmsle.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmsle.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32(
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    i32 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i32> %3,
-    <vscale x 2 x i1> %mask,
-    i32 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmsle.vv v0, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vmsle.vv v14, v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmsle.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32(
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    i32 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i32> %3,
-    <vscale x 4 x i1> %mask,
-    i32 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsle_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vv_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmsle.vv v0, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vmsle.vv v20, v8, v12
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmsle.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32(
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    i32 %4)
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i32> %3,
-    <vscale x 8 x i1> %mask,
-    i32 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vmsle.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmsle.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64(
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    i32 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i64> %3,
-    <vscale x 1 x i1> %mask,
-    i32 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vmsle.vv v0, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vmsle.vv v14, v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmsle.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64(
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    i32 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i64> %3,
-    <vscale x 2 x i1> %mask,
-    i32 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vmsle.vv v0, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vmsle.vv v20, v8, v12
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmsle.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64(
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    i32 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i64> %3,
-    <vscale x 4 x i1> %mask,
-    i32 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  i8,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsle.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i8> %1,
-    i8 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  i8,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsle.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i8> %1,
-    i8 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  i8,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsle.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i8> %1,
-    i8 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  i8,
-  i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsle_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsle.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i8> %1,
-    i8 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  i8,
-  i32);
-
-define <vscale x 16 x i1> @intrinsic_vmsle_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsle.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i8> %1,
-    i8 %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  i8,
-  i32);
-
-define <vscale x 32 x i1> @intrinsic_vmsle_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8(
-  <vscale x 32 x i1>,
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i1>,
-  i32);
-
-define <vscale x 32 x i1> @intrinsic_vmsle_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsle.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i8> %1,
-    i8 %2,
-    <vscale x 32 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  i16,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsle.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i16> %1,
-    i16 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  i16,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsle.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i16> %1,
-    i16 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  i16,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsle.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i16> %1,
-    i16 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  i16,
-  i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsle_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsle.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i16> %1,
-    i16 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  i16,
-  i32);
-
-define <vscale x 16 x i1> @intrinsic_vmsle_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsle.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i16> %1,
-    i16 %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  i32,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsle.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  i32,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsle.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  i32,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsle.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  i32,
-  i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsle_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsle.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  i64,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT:    vlse64.v v9, (a0), zero
-; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    i64 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.i64(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i64>,
-  i64,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT:    vlse64.v v11, (a0), zero
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsle.vv v10, v8, v11, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.i64(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  i64,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vmsle.vv v0, v8, v10
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    i64 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.i64(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i64>,
-  i64,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsle.vv v11, v8, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.i64(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  i64,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vmsle.vv v0, v8, v12
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    i64 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.i64(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i64>,
-  i64,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsle.vv v13, v8, v16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.i64(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    i8 9,
-    i32 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsle.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i8> %1,
-    i8 9,
-    <vscale x 1 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    i8 9,
-    i32 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsle.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i8> %1,
-    i8 9,
-    <vscale x 2 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    i8 9,
-    i32 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsle.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i8> %1,
-    i8 9,
-    <vscale x 4 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsle_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vi_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    i8 9,
-    i32 %1)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsle.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i8> %1,
-    i8 9,
-    <vscale x 8 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsle_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vi_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    i8 9,
-    i32 %1)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsle_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsle.vi v11, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i8> %1,
-    i8 9,
-    <vscale x 16 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmsle_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vi_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    i8 9,
-    i32 %1)
-
-  ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmsle_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsle.vi v13, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i8> %1,
-    i8 9,
-    <vscale x 32 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    i16 9,
-    i32 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsle.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i16> %1,
-    i16 9,
-    <vscale x 1 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    i16 9,
-    i32 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsle.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i16> %1,
-    i16 9,
-    <vscale x 2 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    i16 9,
-    i32 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsle.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i16> %1,
-    i16 9,
-    <vscale x 4 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsle_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vi_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    i16 9,
-    i32 %1)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsle.vi v11, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i16> %1,
-    i16 9,
-    <vscale x 8 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsle_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vi_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    i16 9,
-    i32 %1)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsle_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsle.vi v13, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i16> %1,
-    i16 9,
-    <vscale x 16 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsle.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i32> %1,
-    i32 9,
-    <vscale x 1 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsle.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i32> %1,
-    i32 9,
-    <vscale x 2 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsle.vi v11, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i32> %1,
-    i32 9,
-    <vscale x 4 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsle_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vi_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsle.vi v13, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i32> %1,
-    i32 9,
-    <vscale x 8 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    i64 9,
-    i32 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsle.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.i64(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i64> %1,
-    i64 9,
-    <vscale x 1 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    i64 9,
-    i32 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsle.vi v11, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.i64(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i64> %1,
-    i64 9,
-    <vscale x 2 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    i64 9,
-    i32 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsle.vi v13, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.i64(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i64> %1,
-    i64 9,
-    <vscale x 4 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i1> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsle.ll
similarity index 84%
rename from llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll
rename to llvm/test/CodeGen/RISCV/rvv/vmsle.ll
index 58f33dc872aef..1a02131f73035 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsle.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
 declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -16,7 +18,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -26,9 +28,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -42,13 +44,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8(
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
-    i64 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i8> %3,
     <vscale x 1 x i1> %mask,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -56,9 +58,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -68,7 +70,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -78,9 +80,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -94,13 +96,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8(
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
-    i64 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i8> %3,
     <vscale x 2 x i1> %mask,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -108,9 +110,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -120,7 +122,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -130,9 +132,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -146,13 +148,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8(
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
-    i64 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i8> %3,
     <vscale x 4 x i1> %mask,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -160,9 +162,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsle_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsle_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -172,7 +174,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -182,9 +184,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -198,13 +200,13 @@ entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8(
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
-    i64 %4)
+    iXLen %4)
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i8> %3,
     <vscale x 8 x i1> %mask,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -212,9 +214,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsle_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsle_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -224,7 +226,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -234,9 +236,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -250,13 +252,13 @@ entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8(
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
-    i64 %4)
+    iXLen %4)
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8(
     <vscale x 16 x i1> %0,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i8> %3,
     <vscale x 16 x i1> %mask,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -264,9 +266,9 @@ entry:
 declare <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsle_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsle_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -276,7 +278,7 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i1> %a
 }
@@ -286,9 +288,9 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -302,13 +304,13 @@ entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8(
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
-    i64 %4)
+    iXLen %4)
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8(
     <vscale x 32 x i1> %0,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i8> %3,
     <vscale x 32 x i1> %mask,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 32 x i1> %a
 }
@@ -316,9 +318,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -328,7 +330,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -338,9 +340,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -354,13 +356,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16(
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
-    i64 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i16> %3,
     <vscale x 1 x i1> %mask,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -368,9 +370,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -380,7 +382,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -390,9 +392,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -406,13 +408,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16(
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
-    i64 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i16> %3,
     <vscale x 2 x i1> %mask,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -420,9 +422,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -432,7 +434,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -442,9 +444,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -458,13 +460,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16(
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
-    i64 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i16> %3,
     <vscale x 4 x i1> %mask,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -472,9 +474,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsle_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsle_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -484,7 +486,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -494,9 +496,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -510,13 +512,13 @@ entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16(
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
-    i64 %4)
+    iXLen %4)
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i16> %3,
     <vscale x 8 x i1> %mask,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -524,9 +526,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsle_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsle_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -536,7 +538,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -546,9 +548,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -562,13 +564,13 @@ entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16(
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
-    i64 %4)
+    iXLen %4)
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16(
     <vscale x 16 x i1> %0,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i16> %3,
     <vscale x 16 x i1> %mask,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -576,9 +578,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -588,7 +590,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -598,9 +600,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -614,13 +616,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32(
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
-    i64 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i32> %3,
     <vscale x 1 x i1> %mask,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -628,9 +630,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -640,7 +642,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -650,9 +652,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -666,13 +668,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32(
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
-    i64 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i32> %3,
     <vscale x 2 x i1> %mask,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -680,9 +682,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -692,7 +694,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -702,9 +704,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -718,13 +720,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32(
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
-    i64 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i32> %3,
     <vscale x 4 x i1> %mask,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -732,9 +734,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsle_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsle_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -744,7 +746,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -754,9 +756,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -770,13 +772,13 @@ entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32(
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
-    i64 %4)
+    iXLen %4)
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i32> %3,
     <vscale x 8 x i1> %mask,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -784,9 +786,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -796,7 +798,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -806,9 +808,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -822,13 +824,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64(
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
-    i64 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i64> %3,
     <vscale x 1 x i1> %mask,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -836,9 +838,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -848,7 +850,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -858,9 +860,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -874,13 +876,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64(
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
-    i64 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i64> %3,
     <vscale x 2 x i1> %mask,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -888,9 +890,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -900,7 +902,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -910,9 +912,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -926,13 +928,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64(
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
-    i64 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i64> %3,
     <vscale x 4 x i1> %mask,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -940,9 +942,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -952,7 +954,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.i8(
     <vscale x 1 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -962,9 +964,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -979,7 +981,7 @@ entry:
     <vscale x 1 x i8> %1,
     i8 %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -987,9 +989,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -999,7 +1001,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.i8(
     <vscale x 2 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1009,9 +1011,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1026,7 +1028,7 @@ entry:
     <vscale x 2 x i8> %1,
     i8 %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1034,9 +1036,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -1046,7 +1048,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.i8(
     <vscale x 4 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1056,9 +1058,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1073,7 +1075,7 @@ entry:
     <vscale x 4 x i8> %1,
     i8 %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1081,9 +1083,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsle_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsle_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -1093,7 +1095,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.i8(
     <vscale x 8 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1103,9 +1105,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1120,7 +1122,7 @@ entry:
     <vscale x 8 x i8> %1,
     i8 %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1128,9 +1130,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsle_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsle_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -1140,7 +1142,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.i8(
     <vscale x 16 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1150,9 +1152,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -1167,7 +1169,7 @@ entry:
     <vscale x 16 x i8> %1,
     i8 %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1175,9 +1177,9 @@ entry:
 declare <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsle_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsle_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -1187,7 +1189,7 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.i8(
     <vscale x 32 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i1> %a
 }
@@ -1197,9 +1199,9 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsle_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsle_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -1214,7 +1216,7 @@ entry:
     <vscale x 32 x i8> %1,
     i8 %2,
     <vscale x 32 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 32 x i1> %a
 }
@@ -1222,9 +1224,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1234,7 +1236,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.i16(
     <vscale x 1 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1244,9 +1246,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1261,7 +1263,7 @@ entry:
     <vscale x 1 x i16> %1,
     i16 %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1269,9 +1271,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1281,7 +1283,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.i16(
     <vscale x 2 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1291,9 +1293,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1308,7 +1310,7 @@ entry:
     <vscale x 2 x i16> %1,
     i16 %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1316,9 +1318,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1328,7 +1330,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.i16(
     <vscale x 4 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1338,9 +1340,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1355,7 +1357,7 @@ entry:
     <vscale x 4 x i16> %1,
     i16 %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1363,9 +1365,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsle_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsle_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1375,7 +1377,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.i16(
     <vscale x 8 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1385,9 +1387,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -1402,7 +1404,7 @@ entry:
     <vscale x 8 x i16> %1,
     i16 %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1410,9 +1412,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsle_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsle_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1422,7 +1424,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.i16(
     <vscale x 16 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1432,9 +1434,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -1449,7 +1451,7 @@ entry:
     <vscale x 16 x i16> %1,
     i16 %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1457,9 +1459,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1469,7 +1471,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     i32 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1479,9 +1481,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1496,7 +1498,7 @@ entry:
     <vscale x 1 x i32> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1504,9 +1506,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1516,7 +1518,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     i32 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1526,9 +1528,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1543,7 +1545,7 @@ entry:
     <vscale x 2 x i32> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1551,9 +1553,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1563,7 +1565,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     i32 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1573,9 +1575,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -1590,7 +1592,7 @@ entry:
     <vscale x 4 x i32> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1598,9 +1600,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsle_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsle_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1610,7 +1612,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     i32 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1620,9 +1622,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -1637,7 +1639,7 @@ entry:
     <vscale x 8 x i32> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1645,19 +1647,31 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsle_vx_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vmsle.vv v0, v8, v9
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsle_vx_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT:    vmsle.vx v0, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1667,24 +1681,39 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
   <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsle.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsle_mask_vx_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT:    vlse64.v v11, (a0), zero
+; RV32-NEXT:    vmv1r.v v10, v0
+; RV32-NEXT:    vmv1r.v v0, v9
+; RV32-NEXT:    vmsle.vv v10, v8, v11, v0.t
+; RV32-NEXT:    vmv.v.v v0, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsle_mask_vx_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vmv1r.v v10, v0
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT:    vmv1r.v v0, v9
+; RV64-NEXT:    vmsle.vx v10, v8, a0, v0.t
+; RV64-NEXT:    vmv.v.v v0, v10
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i64> %1,
     i64 %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1692,19 +1721,31 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsle_vx_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vmsle.vv v0, v8, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsle_vx_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT:    vmsle.vx v0, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1714,24 +1755,39 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
   <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsle.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsle_mask_vx_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vmv1r.v v11, v0
+; RV32-NEXT:    vmv1r.v v0, v10
+; RV32-NEXT:    vmsle.vv v11, v8, v12, v0.t
+; RV32-NEXT:    vmv1r.v v0, v11
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsle_mask_vx_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vmv1r.v v11, v0
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT:    vmv1r.v v0, v10
+; RV64-NEXT:    vmsle.vx v11, v8, a0, v0.t
+; RV64-NEXT:    vmv1r.v v0, v11
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i64> %1,
     i64 %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1739,19 +1795,31 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsle_vx_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vmsle.vv v0, v8, v12
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsle_vx_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT:    vmsle.vx v0, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1761,29 +1829,44 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
   <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsle.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsle_mask_vx_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vmv1r.v v13, v0
+; RV32-NEXT:    vmv1r.v v0, v12
+; RV32-NEXT:    vmsle.vv v13, v8, v16, v0.t
+; RV32-NEXT:    vmv1r.v v0, v13
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsle_mask_vx_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vmv1r.v v13, v0
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT:    vmv1r.v v0, v12
+; RV64-NEXT:    vmsle.vx v13, v8, a0, v0.t
+; RV64-NEXT:    vmv1r.v v0, v13
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i64> %1,
     i64 %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -1793,12 +1876,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.i8(
     <vscale x 1 x i8> %0,
     i8 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1813,12 +1896,12 @@ entry:
     <vscale x 1 x i8> %1,
     i8 9,
     <vscale x 1 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -1828,12 +1911,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.i8(
     <vscale x 2 x i8> %0,
     i8 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1848,12 +1931,12 @@ entry:
     <vscale x 2 x i8> %1,
     i8 9,
     <vscale x 2 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -1863,12 +1946,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.i8(
     <vscale x 4 x i8> %0,
     i8 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1883,12 +1966,12 @@ entry:
     <vscale x 4 x i8> %1,
     i8 9,
     <vscale x 4 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsle_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsle_vi_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -1898,12 +1981,12 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.i8(
     <vscale x 8 x i8> %0,
     i8 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1918,12 +2001,12 @@ entry:
     <vscale x 8 x i8> %1,
     i8 9,
     <vscale x 8 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmsle_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsle_vi_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -1933,12 +2016,12 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.i8(
     <vscale x 16 x i8> %0,
     i8 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmsle_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsle_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -1953,12 +2036,12 @@ entry:
     <vscale x 16 x i8> %1,
     i8 9,
     <vscale x 16 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 32 x i1> @intrinsic_vmsle_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsle_vi_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -1968,12 +2051,12 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.i8(
     <vscale x 32 x i8> %0,
     i8 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 32 x i1> %a
 }
 
-define <vscale x 32 x i1> @intrinsic_vmsle_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsle_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -1988,12 +2071,12 @@ entry:
     <vscale x 32 x i8> %1,
     i8 9,
     <vscale x 32 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 32 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -2003,12 +2086,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.i16(
     <vscale x 1 x i16> %0,
     i16 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2023,12 +2106,12 @@ entry:
     <vscale x 1 x i16> %1,
     i16 9,
     <vscale x 1 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -2038,12 +2121,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.i16(
     <vscale x 2 x i16> %0,
     i16 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2058,12 +2141,12 @@ entry:
     <vscale x 2 x i16> %1,
     i16 9,
     <vscale x 2 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -2073,12 +2156,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.i16(
     <vscale x 4 x i16> %0,
     i16 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2093,12 +2176,12 @@ entry:
     <vscale x 4 x i16> %1,
     i16 9,
     <vscale x 4 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsle_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsle_vi_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -2108,12 +2191,12 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.i16(
     <vscale x 8 x i16> %0,
     i16 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -2128,12 +2211,12 @@ entry:
     <vscale x 8 x i16> %1,
     i16 9,
     <vscale x 8 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmsle_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsle_vi_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -2143,12 +2226,12 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.i16(
     <vscale x 16 x i16> %0,
     i16 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmsle_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsle_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -2163,12 +2246,12 @@ entry:
     <vscale x 16 x i16> %1,
     i16 9,
     <vscale x 16 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -2178,12 +2261,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     i32 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2198,12 +2281,12 @@ entry:
     <vscale x 1 x i32> %1,
     i32 9,
     <vscale x 1 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -2213,12 +2296,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     i32 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2233,12 +2316,12 @@ entry:
     <vscale x 2 x i32> %1,
     i32 9,
     <vscale x 2 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -2248,12 +2331,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     i32 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -2268,12 +2351,12 @@ entry:
     <vscale x 4 x i32> %1,
     i32 9,
     <vscale x 4 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsle_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsle_vi_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -2283,12 +2366,12 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     i32 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -2303,12 +2386,12 @@ entry:
     <vscale x 8 x i32> %1,
     i32 9,
     <vscale x 8 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -2318,12 +2401,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2338,12 +2421,12 @@ entry:
     <vscale x 1 x i64> %1,
     i64 9,
     <vscale x 1 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -2353,12 +2436,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -2373,12 +2456,12 @@ entry:
     <vscale x 2 x i64> %1,
     i64 9,
     <vscale x 2 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -2388,12 +2471,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -2408,7 +2491,7 @@ entry:
     <vscale x 4 x i64> %1,
     i64 9,
     <vscale x 4 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll
deleted file mode 100644
index f933c991f389a..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll
+++ /dev/null
@@ -1,2414 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmsleu.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsleu.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8(
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i8> %3,
-    <vscale x 1 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmsleu.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsleu.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8(
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i8> %3,
-    <vscale x 2 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmsleu.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsleu.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8(
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i8> %3,
-    <vscale x 4 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsleu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vv_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmsleu.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmsleu.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8(
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i8> %3,
-    <vscale x 8 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsleu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vv_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vmsleu.vv v0, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmsleu.vv v14, v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmsleu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8(
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i8> %3,
-    <vscale x 16 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsleu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vv_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vmsleu.vv v0, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8(
-  <vscale x 32 x i1>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmsleu.vv v20, v8, v12
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmsleu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8(
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i8> %3,
-    <vscale x 32 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vmsleu.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsleu.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16(
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i16> %3,
-    <vscale x 1 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vmsleu.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsleu.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16(
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i16> %3,
-    <vscale x 2 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vmsleu.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmsleu.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16(
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i16> %3,
-    <vscale x 4 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsleu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vv_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vmsleu.vv v0, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vmsleu.vv v14, v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmsleu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16(
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i16> %3,
-    <vscale x 8 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsleu_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vv_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vmsleu.vv v0, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vmsleu.vv v20, v8, v12
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmsleu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16(
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i16> %3,
-    <vscale x 16 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vmsleu.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsleu.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32(
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    i64 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i32> %3,
-    <vscale x 1 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vmsleu.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmsleu.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32(
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    i64 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i32> %3,
-    <vscale x 2 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmsleu.vv v0, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vmsleu.vv v14, v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmsleu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32(
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    i64 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i32> %3,
-    <vscale x 4 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsleu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vv_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmsleu.vv v0, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vmsleu.vv v20, v8, v12
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmsleu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32(
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    i64 %4)
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i32> %3,
-    <vscale x 8 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vmsleu.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmsleu.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64(
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    i64 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i64> %3,
-    <vscale x 1 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vmsleu.vv v0, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vmsleu.vv v14, v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmsleu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64(
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    i64 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i64> %3,
-    <vscale x 2 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vmsleu.vv v0, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vmsleu.vv v20, v8, v12
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmsleu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64(
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    i64 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i64> %3,
-    <vscale x 4 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  i8,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsleu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i8> %1,
-    i8 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  i8,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsleu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i8> %1,
-    i8 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  i8,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsleu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i8> %1,
-    i8 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  i8,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsleu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsleu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i8> %1,
-    i8 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  i8,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsleu_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsleu.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i8> %1,
-    i8 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  i8,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsleu_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8(
-  <vscale x 32 x i1>,
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsleu.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i8> %1,
-    i8 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  i16,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsleu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i16> %1,
-    i16 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  i16,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsleu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i16> %1,
-    i16 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  i16,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsleu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i16> %1,
-    i16 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  i16,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsleu_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsleu.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i16> %1,
-    i16 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  i16,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsleu_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsleu.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i16> %1,
-    i16 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  i32,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsleu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  i32,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsleu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  i32,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsleu.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  i32,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsleu_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsleu.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  i64,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.i64(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i64>,
-  i64,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsleu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.i64(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  i64,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.i64(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i64>,
-  i64,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsleu.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.i64(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  i64,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.i64(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i64>,
-  i64,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsleu.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.i64(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    i8 9,
-    i64 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsleu.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i8> %1,
-    i8 9,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    i8 9,
-    i64 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsleu.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i8> %1,
-    i8 9,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    i8 9,
-    i64 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsleu.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i8> %1,
-    i8 9,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsleu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vi_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    i8 9,
-    i64 %1)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsleu.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i8> %1,
-    i8 9,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsleu_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vi_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    i8 9,
-    i64 %1)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsleu.vi v11, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i8> %1,
-    i8 9,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmsleu_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vi_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    i8 9,
-    i64 %1)
-
-  ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsleu.vi v13, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i8> %1,
-    i8 9,
-    <vscale x 32 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    i16 9,
-    i64 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsleu.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i16> %1,
-    i16 9,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    i16 9,
-    i64 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsleu.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i16> %1,
-    i16 9,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    i16 9,
-    i64 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsleu.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i16> %1,
-    i16 9,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsleu_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vi_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    i16 9,
-    i64 %1)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsleu.vi v11, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i16> %1,
-    i16 9,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsleu_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vi_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    i16 9,
-    i64 %1)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsleu.vi v13, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i16> %1,
-    i16 9,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    i32 9,
-    i64 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsleu.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i32> %1,
-    i32 9,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    i32 9,
-    i64 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsleu.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i32> %1,
-    i32 9,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    i32 9,
-    i64 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsleu.vi v11, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i32> %1,
-    i32 9,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsleu_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vi_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    i32 9,
-    i64 %1)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsleu.vi v13, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i32> %1,
-    i32 9,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsleu.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.i64(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i64> %1,
-    i64 9,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsleu.vi v11, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.i64(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i64> %1,
-    i64 9,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsleu.vi v13, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.i64(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i64> %1,
-    i64 9,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsleu.ll
similarity index 85%
rename from llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vmsleu.ll
index 6580b5a261df9..cc17def1b69e1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsleu.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
 declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -16,7 +18,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -26,9 +28,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -42,13 +44,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8(
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i8> %3,
     <vscale x 1 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -56,9 +58,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -68,7 +70,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -78,9 +80,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -94,13 +96,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8(
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i8> %3,
     <vscale x 2 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -108,9 +110,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -120,7 +122,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -130,9 +132,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -146,13 +148,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8(
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i8> %3,
     <vscale x 4 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -160,9 +162,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsleu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsleu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -172,7 +174,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -182,9 +184,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -198,13 +200,13 @@ entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8(
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i8> %3,
     <vscale x 8 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -212,9 +214,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsleu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsleu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -224,7 +226,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -234,9 +236,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -250,13 +252,13 @@ entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8(
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8(
     <vscale x 16 x i1> %0,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i8> %3,
     <vscale x 16 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -264,9 +266,9 @@ entry:
 declare <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsleu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsleu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -276,7 +278,7 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i1> %a
 }
@@ -286,9 +288,9 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -302,13 +304,13 @@ entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8(
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8(
     <vscale x 32 x i1> %0,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i8> %3,
     <vscale x 32 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 32 x i1> %a
 }
@@ -316,9 +318,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -328,7 +330,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -338,9 +340,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -354,13 +356,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16(
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i16> %3,
     <vscale x 1 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -368,9 +370,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -380,7 +382,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -390,9 +392,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -406,13 +408,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16(
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i16> %3,
     <vscale x 2 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -420,9 +422,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -432,7 +434,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -442,9 +444,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -458,13 +460,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16(
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i16> %3,
     <vscale x 4 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -472,9 +474,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsleu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsleu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -484,7 +486,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -494,9 +496,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -510,13 +512,13 @@ entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16(
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i16> %3,
     <vscale x 8 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -524,9 +526,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsleu_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsleu_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -536,7 +538,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -546,9 +548,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -562,13 +564,13 @@ entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16(
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16(
     <vscale x 16 x i1> %0,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i16> %3,
     <vscale x 16 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -576,9 +578,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -588,7 +590,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -598,9 +600,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -614,13 +616,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32(
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i32> %3,
     <vscale x 1 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -628,9 +630,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -640,7 +642,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -650,9 +652,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -666,13 +668,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32(
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i32> %3,
     <vscale x 2 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -680,9 +682,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -692,7 +694,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -702,9 +704,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -718,13 +720,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32(
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i32> %3,
     <vscale x 4 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -732,9 +734,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsleu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsleu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -744,7 +746,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -754,9 +756,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -770,13 +772,13 @@ entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32(
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i32> %3,
     <vscale x 8 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -784,9 +786,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -796,7 +798,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -806,9 +808,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -822,13 +824,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64(
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i64> %3,
     <vscale x 1 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -836,9 +838,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -848,7 +850,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -858,9 +860,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -874,13 +876,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64(
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i64> %3,
     <vscale x 2 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -888,9 +890,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -900,7 +902,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -910,9 +912,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -926,13 +928,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64(
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i64> %3,
     <vscale x 4 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -940,9 +942,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -952,7 +954,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -962,9 +964,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -979,7 +981,7 @@ entry:
     <vscale x 1 x i8> %1,
     i8 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -987,9 +989,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -999,7 +1001,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1009,9 +1011,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1026,7 +1028,7 @@ entry:
     <vscale x 2 x i8> %1,
     i8 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1034,9 +1036,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -1046,7 +1048,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1056,9 +1058,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1073,7 +1075,7 @@ entry:
     <vscale x 4 x i8> %1,
     i8 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1081,9 +1083,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsleu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsleu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -1093,7 +1095,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1103,9 +1105,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1120,7 +1122,7 @@ entry:
     <vscale x 8 x i8> %1,
     i8 %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1128,9 +1130,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsleu_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsleu_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -1140,7 +1142,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1150,9 +1152,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -1167,7 +1169,7 @@ entry:
     <vscale x 16 x i8> %1,
     i8 %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1175,9 +1177,9 @@ entry:
 declare <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsleu_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsleu_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -1187,7 +1189,7 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i1> %a
 }
@@ -1197,9 +1199,9 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -1214,7 +1216,7 @@ entry:
     <vscale x 32 x i8> %1,
     i8 %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 32 x i1> %a
 }
@@ -1222,9 +1224,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1234,7 +1236,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1244,9 +1246,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1261,7 +1263,7 @@ entry:
     <vscale x 1 x i16> %1,
     i16 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1269,9 +1271,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1281,7 +1283,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1291,9 +1293,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1308,7 +1310,7 @@ entry:
     <vscale x 2 x i16> %1,
     i16 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1316,9 +1318,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1328,7 +1330,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1338,9 +1340,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1355,7 +1357,7 @@ entry:
     <vscale x 4 x i16> %1,
     i16 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1363,9 +1365,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsleu_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsleu_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1375,7 +1377,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1385,9 +1387,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -1402,7 +1404,7 @@ entry:
     <vscale x 8 x i16> %1,
     i16 %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1410,9 +1412,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsleu_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsleu_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1422,7 +1424,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1432,9 +1434,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -1449,7 +1451,7 @@ entry:
     <vscale x 16 x i16> %1,
     i16 %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1457,9 +1459,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1469,7 +1471,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1479,9 +1481,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1496,7 +1498,7 @@ entry:
     <vscale x 1 x i32> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1504,9 +1506,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1516,7 +1518,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1526,9 +1528,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1543,7 +1545,7 @@ entry:
     <vscale x 2 x i32> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1551,9 +1553,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1563,7 +1565,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1573,9 +1575,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -1590,7 +1592,7 @@ entry:
     <vscale x 4 x i32> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1598,9 +1600,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsleu_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsleu_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1610,7 +1612,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1620,9 +1622,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -1637,7 +1639,7 @@ entry:
     <vscale x 8 x i32> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1645,25 +1647,31 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT:    vlse64.v v9, (a0), zero
-; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsleu_vx_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vmsleu.vv v0, v8, v9
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsleu_vx_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT:    vmsleu.vx v0, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1673,30 +1681,39 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
   <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT:    vlse64.v v11, (a0), zero
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsleu.vv v10, v8, v11, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsleu_mask_vx_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT:    vlse64.v v11, (a0), zero
+; RV32-NEXT:    vmv1r.v v10, v0
+; RV32-NEXT:    vmv1r.v v0, v9
+; RV32-NEXT:    vmsleu.vv v10, v8, v11, v0.t
+; RV32-NEXT:    vmv.v.v v0, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsleu_mask_vx_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vmv1r.v v10, v0
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT:    vmv1r.v v0, v9
+; RV64-NEXT:    vmsleu.vx v10, v8, a0, v0.t
+; RV64-NEXT:    vmv.v.v v0, v10
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i64> %1,
     i64 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1704,25 +1721,31 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vmsleu.vv v0, v8, v10
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsleu_vx_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vmsleu.vv v0, v8, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsleu_vx_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT:    vmsleu.vx v0, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1732,30 +1755,39 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
   <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsleu.vv v11, v8, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsleu_mask_vx_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vmv1r.v v11, v0
+; RV32-NEXT:    vmv1r.v v0, v10
+; RV32-NEXT:    vmsleu.vv v11, v8, v12, v0.t
+; RV32-NEXT:    vmv1r.v v0, v11
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsleu_mask_vx_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vmv1r.v v11, v0
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT:    vmv1r.v v0, v10
+; RV64-NEXT:    vmsleu.vx v11, v8, a0, v0.t
+; RV64-NEXT:    vmv1r.v v0, v11
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i64> %1,
     i64 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1763,25 +1795,31 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vmsleu.vv v0, v8, v12
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsleu_vx_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vmsleu.vv v0, v8, v12
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsleu_vx_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT:    vmsleu.vx v0, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1791,35 +1829,44 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
   <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsleu.vv v13, v8, v16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsleu_mask_vx_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vmv1r.v v13, v0
+; RV32-NEXT:    vmv1r.v v0, v12
+; RV32-NEXT:    vmsleu.vv v13, v8, v16, v0.t
+; RV32-NEXT:    vmv1r.v v0, v13
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsleu_mask_vx_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vmv1r.v v13, v0
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT:    vmv1r.v v0, v12
+; RV64-NEXT:    vmsleu.vx v13, v8, a0, v0.t
+; RV64-NEXT:    vmv1r.v v0, v13
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i64> %1,
     i64 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -1829,12 +1876,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
     i8 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1849,12 +1896,12 @@ entry:
     <vscale x 1 x i8> %1,
     i8 9,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -1864,12 +1911,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
     i8 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1884,12 +1931,12 @@ entry:
     <vscale x 2 x i8> %1,
     i8 9,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -1899,12 +1946,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
     i8 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1919,12 +1966,12 @@ entry:
     <vscale x 4 x i8> %1,
     i8 9,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsleu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsleu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -1934,12 +1981,12 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
     i8 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1954,12 +2001,12 @@ entry:
     <vscale x 8 x i8> %1,
     i8 9,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmsleu_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsleu_vi_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -1969,12 +2016,12 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
     i8 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -1989,12 +2036,12 @@ entry:
     <vscale x 16 x i8> %1,
     i8 9,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 32 x i1> @intrinsic_vmsleu_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsleu_vi_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -2004,12 +2051,12 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
     i8 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 32 x i1> %a
 }
 
-define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -2024,12 +2071,12 @@ entry:
     <vscale x 32 x i8> %1,
     i8 9,
     <vscale x 32 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 32 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -2039,12 +2086,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
     i16 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2059,12 +2106,12 @@ entry:
     <vscale x 1 x i16> %1,
     i16 9,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -2074,12 +2121,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
     i16 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2094,12 +2141,12 @@ entry:
     <vscale x 2 x i16> %1,
     i16 9,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -2109,12 +2156,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
     i16 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2129,12 +2176,12 @@ entry:
     <vscale x 4 x i16> %1,
     i16 9,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsleu_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsleu_vi_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -2144,12 +2191,12 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
     i16 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -2164,12 +2211,12 @@ entry:
     <vscale x 8 x i16> %1,
     i16 9,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmsleu_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsleu_vi_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -2179,12 +2226,12 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
     i16 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -2199,12 +2246,12 @@ entry:
     <vscale x 16 x i16> %1,
     i16 9,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -2214,12 +2261,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     i32 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2234,12 +2281,12 @@ entry:
     <vscale x 1 x i32> %1,
     i32 9,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -2249,12 +2296,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     i32 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2269,12 +2316,12 @@ entry:
     <vscale x 2 x i32> %1,
     i32 9,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -2284,12 +2331,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     i32 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -2304,12 +2351,12 @@ entry:
     <vscale x 4 x i32> %1,
     i32 9,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsleu_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsleu_vi_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -2319,12 +2366,12 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     i32 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -2339,12 +2386,12 @@ entry:
     <vscale x 8 x i32> %1,
     i32 9,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -2354,12 +2401,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2374,12 +2421,12 @@ entry:
     <vscale x 1 x i64> %1,
     i64 9,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -2389,12 +2436,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -2409,12 +2456,12 @@ entry:
     <vscale x 2 x i64> %1,
     i64 9,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -2424,12 +2471,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -2444,7 +2491,7 @@ entry:
     <vscale x 4 x i64> %1,
     i64 9,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll
deleted file mode 100644
index 14a134056df6b..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll
+++ /dev/null
@@ -1,2414 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmslt.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmslt.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i8> %3,
-    <vscale x 1 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmslt.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmslt.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8(
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i8> %3,
-    <vscale x 2 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmslt.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmslt.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8(
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i8> %3,
-    <vscale x 4 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmslt_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmslt.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmslt.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8(
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i8> %3,
-    <vscale x 8 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmslt_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vv_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vmslt.vv v0, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmslt.vv v14, v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmslt.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8(
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i8> %3,
-    <vscale x 16 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmslt_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vv_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vmslt.vv v0, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8(
-  <vscale x 32 x i1>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmslt.vv v20, v8, v12
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmslt.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8(
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i8> %3,
-    <vscale x 32 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vmslt.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmslt.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16(
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i16> %3,
-    <vscale x 1 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vmslt.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmslt.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16(
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i16> %3,
-    <vscale x 2 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vmslt.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmslt.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16(
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i16> %3,
-    <vscale x 4 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmslt_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vmslt.vv v0, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vmslt.vv v14, v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmslt.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16(
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i16> %3,
-    <vscale x 8 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmslt_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vv_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vmslt.vv v0, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vmslt.vv v20, v8, v12
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmslt.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16(
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i16> %3,
-    <vscale x 16 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vmslt.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmslt.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32(
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    i64 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i32> %3,
-    <vscale x 1 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vmslt.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmslt.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32(
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    i64 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i32> %3,
-    <vscale x 2 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmslt.vv v0, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vmslt.vv v14, v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmslt.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32(
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    i64 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i32> %3,
-    <vscale x 4 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmslt_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmslt.vv v0, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vmslt.vv v20, v8, v12
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmslt.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32(
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    i64 %4)
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i32> %3,
-    <vscale x 8 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vmslt.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmslt.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64(
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    i64 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i64> %3,
-    <vscale x 1 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vmslt.vv v0, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vmslt.vv v14, v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmslt.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64(
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    i64 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i64> %3,
-    <vscale x 2 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vmslt.vv v0, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vmslt.vv v20, v8, v12
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmslt.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64(
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    i64 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i64> %3,
-    <vscale x 4 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  i8,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i8> %1,
-    i8 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  i8,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i8> %1,
-    i8 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  i8,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i8> %1,
-    i8 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  i8,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmslt_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i8> %1,
-    i8 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  i8,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmslt_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmslt.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i8> %1,
-    i8 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  i8,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmslt_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8(
-  <vscale x 32 x i1>,
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmslt_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmslt.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i8> %1,
-    i8 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  i16,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i16> %1,
-    i16 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  i16,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i16> %1,
-    i16 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  i16,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i16> %1,
-    i16 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  i16,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmslt_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmslt.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i16> %1,
-    i16 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  i16,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmslt_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmslt.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i16> %1,
-    i16 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  i32,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  i32,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  i32,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmslt.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  i32,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmslt_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmslt.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  i64,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.i64(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i64>,
-  i64,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmslt.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.i64(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  i64,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.i64(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i64>,
-  i64,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmslt.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.i64(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  i64,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.i64(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i64>,
-  i64,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmslt.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.i64(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vmsle.vi v0, v8, -16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    i8 -15,
-    i64 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsle.vi v10, v8, -15, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i8> %1,
-    i8 -14,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vmsle.vi v0, v8, -14
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    i8 -13,
-    i64 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsle.vi v10, v8, -13, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i8> %1,
-    i8 -12,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vmsle.vi v0, v8, -12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    i8 -11,
-    i64 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsle.vi v10, v8, -11, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i8> %1,
-    i8 -10,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vmsle.vi v0, v8, -10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    i8 -9,
-    i64 %1)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsle.vi v10, v8, -9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i8> %1,
-    i8 -8,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmslt_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vi_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vmsle.vi v0, v8, -8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    i8 -7,
-    i64 %1)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsle.vi v11, v8, -7, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i8> %1,
-    i8 -6,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmslt_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vi_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vmsle.vi v0, v8, -6
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    i8 -5,
-    i64 %1)
-
-  ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmslt_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsle.vi v13, v8, -5, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i8> %1,
-    i8 -4,
-    <vscale x 32 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vmsle.vi v0, v8, -4
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    i16 -3,
-    i64 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsle.vi v10, v8, -3, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i16> %1,
-    i16 -2,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vmsle.vi v0, v8, -2
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    i16 -1,
-    i64 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmslt.vx v10, v8, zero, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i16> %1,
-    i16 0,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vmslt.vx v0, v8, zero
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    i16 0,
-    i64 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsle.vi v10, v8, 0, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i16> %1,
-    i16 1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vmsle.vi v0, v8, 1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    i16 2,
-    i64 %1)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsle.vi v11, v8, 2, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i16> %1,
-    i16 3,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmslt_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vi_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vmsle.vi v0, v8, 3
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    i16 4,
-    i64 %1)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsle.vi v13, v8, 4, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i16> %1,
-    i16 5,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vmsle.vi v0, v8, 5
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    i32 6,
-    i64 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsle.vi v10, v8, 6, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i32> %1,
-    i32 7,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vmsle.vi v0, v8, 7
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    i32 8,
-    i64 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsle.vi v10, v8, 8, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i32> %1,
-    i32 9,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    i32 10,
-    i64 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsle.vi v11, v8, 10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i32> %1,
-    i32 11,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmsle.vi v0, v8, 11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    i32 12,
-    i64 %1)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsle.vi v13, v8, 12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i32> %1,
-    i32 13,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vmsle.vi v0, v8, 13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    i64 14,
-    i64 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsle.vi v10, v8, 14, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.i64(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i64> %1,
-    i64 15,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vmsle.vi v0, v8, 15
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    i64 16,
-    i64 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsle.vi v11, v8, -16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.i64(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i64> %1,
-    i64 -15,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vmsle.vi v0, v8, -15
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    i64 -14,
-    i64 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsle.vi v13, v8, -14, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.i64(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i64> %1,
-    i64 -13,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmslt.ll
similarity index 84%
rename from llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vmslt.ll
index 9be3ee4e68f86..ab56392226c96 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmslt.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
 declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -16,7 +18,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -26,9 +28,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -42,13 +44,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i8> %3,
     <vscale x 1 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -56,9 +58,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -68,7 +70,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -78,9 +80,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -94,13 +96,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8(
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i8> %3,
     <vscale x 2 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -108,9 +110,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -120,7 +122,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -130,9 +132,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -146,13 +148,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8(
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i8> %3,
     <vscale x 4 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -160,9 +162,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmslt_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmslt_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -172,7 +174,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -182,9 +184,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -198,13 +200,13 @@ entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8(
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i8> %3,
     <vscale x 8 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -212,9 +214,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmslt_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmslt_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -224,7 +226,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -234,9 +236,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -250,13 +252,13 @@ entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8(
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8(
     <vscale x 16 x i1> %0,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i8> %3,
     <vscale x 16 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -264,9 +266,9 @@ entry:
 declare <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmslt_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmslt_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -276,7 +278,7 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i1> %a
 }
@@ -286,9 +288,9 @@ declare <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -302,13 +304,13 @@ entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8(
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8(
     <vscale x 32 x i1> %0,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i8> %3,
     <vscale x 32 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 32 x i1> %a
 }
@@ -316,9 +318,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -328,7 +330,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -338,9 +340,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -354,13 +356,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16(
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i16> %3,
     <vscale x 1 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -368,9 +370,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -380,7 +382,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -390,9 +392,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -406,13 +408,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16(
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i16> %3,
     <vscale x 2 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -420,9 +422,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -432,7 +434,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -442,9 +444,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -458,13 +460,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16(
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i16> %3,
     <vscale x 4 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -472,9 +474,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmslt_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmslt_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -484,7 +486,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -494,9 +496,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -510,13 +512,13 @@ entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16(
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i16> %3,
     <vscale x 8 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -524,9 +526,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmslt_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmslt_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -536,7 +538,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -546,9 +548,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -562,13 +564,13 @@ entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16(
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16(
     <vscale x 16 x i1> %0,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i16> %3,
     <vscale x 16 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -576,9 +578,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -588,7 +590,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -598,9 +600,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -614,13 +616,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32(
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i32> %3,
     <vscale x 1 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -628,9 +630,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -640,7 +642,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -650,9 +652,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -666,13 +668,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32(
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i32> %3,
     <vscale x 2 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -680,9 +682,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -692,7 +694,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -702,9 +704,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -718,13 +720,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32(
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i32> %3,
     <vscale x 4 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -732,9 +734,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmslt_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmslt_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -744,7 +746,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -754,9 +756,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -770,13 +772,13 @@ entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32(
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i32> %3,
     <vscale x 8 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -784,9 +786,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -796,7 +798,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -806,9 +808,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -822,13 +824,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64(
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i64> %3,
     <vscale x 1 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -836,9 +838,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -848,7 +850,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -858,9 +860,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -874,13 +876,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64(
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i64> %3,
     <vscale x 2 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -888,9 +890,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -900,7 +902,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -910,9 +912,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -926,13 +928,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64(
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i64> %3,
     <vscale x 4 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -940,9 +942,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -952,7 +954,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8(
     <vscale x 1 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -962,9 +964,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -979,7 +981,7 @@ entry:
     <vscale x 1 x i8> %1,
     i8 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -987,9 +989,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -999,7 +1001,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8(
     <vscale x 2 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1009,9 +1011,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1026,7 +1028,7 @@ entry:
     <vscale x 2 x i8> %1,
     i8 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1034,9 +1036,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -1046,7 +1048,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8(
     <vscale x 4 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1056,9 +1058,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1073,7 +1075,7 @@ entry:
     <vscale x 4 x i8> %1,
     i8 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1081,9 +1083,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmslt_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmslt_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -1093,7 +1095,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8(
     <vscale x 8 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1103,9 +1105,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1120,7 +1122,7 @@ entry:
     <vscale x 8 x i8> %1,
     i8 %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1128,9 +1130,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmslt_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmslt_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -1140,7 +1142,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8(
     <vscale x 16 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1150,9 +1152,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -1167,7 +1169,7 @@ entry:
     <vscale x 16 x i8> %1,
     i8 %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1175,9 +1177,9 @@ entry:
 declare <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmslt_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmslt_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -1187,7 +1189,7 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8(
     <vscale x 32 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i1> %a
 }
@@ -1197,9 +1199,9 @@ declare <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmslt_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmslt_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -1214,7 +1216,7 @@ entry:
     <vscale x 32 x i8> %1,
     i8 %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 32 x i1> %a
 }
@@ -1222,9 +1224,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1234,7 +1236,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16(
     <vscale x 1 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1244,9 +1246,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1261,7 +1263,7 @@ entry:
     <vscale x 1 x i16> %1,
     i16 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1269,9 +1271,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1281,7 +1283,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16(
     <vscale x 2 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1291,9 +1293,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1308,7 +1310,7 @@ entry:
     <vscale x 2 x i16> %1,
     i16 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1316,9 +1318,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1328,7 +1330,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16(
     <vscale x 4 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1338,9 +1340,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1355,7 +1357,7 @@ entry:
     <vscale x 4 x i16> %1,
     i16 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1363,9 +1365,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmslt_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmslt_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1375,7 +1377,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16(
     <vscale x 8 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1385,9 +1387,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -1402,7 +1404,7 @@ entry:
     <vscale x 8 x i16> %1,
     i16 %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1410,9 +1412,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmslt_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmslt_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1422,7 +1424,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16(
     <vscale x 16 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1432,9 +1434,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -1449,7 +1451,7 @@ entry:
     <vscale x 16 x i16> %1,
     i16 %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1457,9 +1459,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1469,7 +1471,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1479,9 +1481,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1496,7 +1498,7 @@ entry:
     <vscale x 1 x i32> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1504,9 +1506,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1516,7 +1518,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1526,9 +1528,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1543,7 +1545,7 @@ entry:
     <vscale x 2 x i32> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1551,9 +1553,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1563,7 +1565,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1573,9 +1575,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -1590,7 +1592,7 @@ entry:
     <vscale x 4 x i32> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1598,9 +1600,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmslt_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmslt_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1610,7 +1612,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1620,9 +1622,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -1637,7 +1639,7 @@ entry:
     <vscale x 8 x i32> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1645,25 +1647,31 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT:    vlse64.v v9, (a0), zero
-; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmslt_vx_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vmslt.vv v0, v8, v9
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmslt_vx_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT:    vmslt.vx v0, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1673,30 +1681,39 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
   <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT:    vlse64.v v11, (a0), zero
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmslt.vv v10, v8, v11, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmslt_mask_vx_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT:    vlse64.v v11, (a0), zero
+; RV32-NEXT:    vmv1r.v v10, v0
+; RV32-NEXT:    vmv1r.v v0, v9
+; RV32-NEXT:    vmslt.vv v10, v8, v11, v0.t
+; RV32-NEXT:    vmv.v.v v0, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmslt_mask_vx_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vmv1r.v v10, v0
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT:    vmv1r.v v0, v9
+; RV64-NEXT:    vmslt.vx v10, v8, a0, v0.t
+; RV64-NEXT:    vmv.v.v v0, v10
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i64> %1,
     i64 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1704,25 +1721,31 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vmslt.vv v0, v8, v10
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmslt_vx_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vmslt.vv v0, v8, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmslt_vx_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT:    vmslt.vx v0, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1732,30 +1755,39 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
   <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmslt.vv v11, v8, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmslt_mask_vx_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vmv1r.v v11, v0
+; RV32-NEXT:    vmv1r.v v0, v10
+; RV32-NEXT:    vmslt.vv v11, v8, v12, v0.t
+; RV32-NEXT:    vmv1r.v v0, v11
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmslt_mask_vx_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vmv1r.v v11, v0
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT:    vmv1r.v v0, v10
+; RV64-NEXT:    vmslt.vx v11, v8, a0, v0.t
+; RV64-NEXT:    vmv1r.v v0, v11
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i64> %1,
     i64 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1763,25 +1795,31 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vmslt.vv v0, v8, v12
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmslt_vx_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vmslt.vv v0, v8, v12
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmslt_vx_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT:    vmslt.vx v0, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1791,35 +1829,44 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
   <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmslt.vv v13, v8, v16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmslt_mask_vx_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vmv1r.v v13, v0
+; RV32-NEXT:    vmv1r.v v0, v12
+; RV32-NEXT:    vmslt.vv v13, v8, v16, v0.t
+; RV32-NEXT:    vmv1r.v v0, v13
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmslt_mask_vx_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vmv1r.v v13, v0
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT:    vmv1r.v v0, v12
+; RV64-NEXT:    vmslt.vx v13, v8, a0, v0.t
+; RV64-NEXT:    vmv1r.v v0, v13
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i64> %1,
     i64 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -1829,12 +1876,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8(
     <vscale x 1 x i8> %0,
     i8 -15,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1849,12 +1896,12 @@ entry:
     <vscale x 1 x i8> %1,
     i8 -14,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -1864,12 +1911,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8(
     <vscale x 2 x i8> %0,
     i8 -13,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1884,12 +1931,12 @@ entry:
     <vscale x 2 x i8> %1,
     i8 -12,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -1899,12 +1946,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8(
     <vscale x 4 x i8> %0,
     i8 -11,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1919,12 +1966,12 @@ entry:
     <vscale x 4 x i8> %1,
     i8 -10,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -1934,12 +1981,12 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8(
     <vscale x 8 x i8> %0,
     i8 -9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1954,12 +2001,12 @@ entry:
     <vscale x 8 x i8> %1,
     i8 -8,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmslt_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmslt_vi_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -1969,12 +2016,12 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8(
     <vscale x 16 x i8> %0,
     i8 -7,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -1989,12 +2036,12 @@ entry:
     <vscale x 16 x i8> %1,
     i8 -6,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 32 x i1> @intrinsic_vmslt_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmslt_vi_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -2004,12 +2051,12 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8(
     <vscale x 32 x i8> %0,
     i8 -5,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 32 x i1> %a
 }
 
-define <vscale x 32 x i1> @intrinsic_vmslt_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmslt_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -2024,12 +2071,12 @@ entry:
     <vscale x 32 x i8> %1,
     i8 -4,
     <vscale x 32 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 32 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -2039,12 +2086,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16(
     <vscale x 1 x i16> %0,
     i16 -3,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2059,12 +2106,12 @@ entry:
     <vscale x 1 x i16> %1,
     i16 -2,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -2074,12 +2121,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16(
     <vscale x 2 x i16> %0,
     i16 -1,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2094,12 +2141,12 @@ entry:
     <vscale x 2 x i16> %1,
     i16 0,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -2109,12 +2156,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16(
     <vscale x 4 x i16> %0,
     i16 0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2129,12 +2176,12 @@ entry:
     <vscale x 4 x i16> %1,
     i16 1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -2144,12 +2191,12 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16(
     <vscale x 8 x i16> %0,
     i16 2,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -2164,12 +2211,12 @@ entry:
     <vscale x 8 x i16> %1,
     i16 3,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmslt_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmslt_vi_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -2179,12 +2226,12 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16(
     <vscale x 16 x i16> %0,
     i16 4,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -2199,12 +2246,12 @@ entry:
     <vscale x 16 x i16> %1,
     i16 5,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -2214,12 +2261,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     i32 6,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2234,12 +2281,12 @@ entry:
     <vscale x 1 x i32> %1,
     i32 7,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -2249,12 +2296,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     i32 8,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2269,12 +2316,12 @@ entry:
     <vscale x 2 x i32> %1,
     i32 9,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -2284,12 +2331,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     i32 10,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -2304,12 +2351,12 @@ entry:
     <vscale x 4 x i32> %1,
     i32 11,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -2319,12 +2366,12 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     i32 12,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -2339,12 +2386,12 @@ entry:
     <vscale x 8 x i32> %1,
     i32 13,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -2354,12 +2401,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2374,12 +2421,12 @@ entry:
     <vscale x 1 x i64> %1,
     i64 9,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -2389,12 +2436,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -2409,12 +2456,12 @@ entry:
     <vscale x 2 x i64> %1,
     i64 9,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -2424,12 +2471,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -2444,7 +2491,7 @@ entry:
     <vscale x 4 x i64> %1,
     i64 9,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll
deleted file mode 100644
index 87e0f9ad4825b..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll
+++ /dev/null
@@ -1,2414 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmsltu.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsltu.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8(
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i8> %3,
-    <vscale x 1 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmsltu.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsltu.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8(
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i8> %3,
-    <vscale x 2 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmsltu.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsltu.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8(
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i8> %3,
-    <vscale x 4 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsltu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vv_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmsltu.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmsltu.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8(
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i8> %3,
-    <vscale x 8 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsltu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vv_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vmsltu.vv v0, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmsltu.vv v14, v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmsltu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8(
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i8> %3,
-    <vscale x 16 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsltu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vv_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vmsltu.vv v0, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8(
-  <vscale x 32 x i1>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmsltu.vv v20, v8, v12
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmsltu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8(
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    i64 %4)
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i8> %3,
-    <vscale x 32 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vmsltu.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsltu.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16(
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i16> %3,
-    <vscale x 1 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vmsltu.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsltu.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16(
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i16> %3,
-    <vscale x 2 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vmsltu.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmsltu.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16(
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i16> %3,
-    <vscale x 4 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsltu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vv_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vmsltu.vv v0, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vmsltu.vv v14, v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmsltu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16(
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i16> %3,
-    <vscale x 8 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsltu_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vv_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vmsltu.vv v0, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vmsltu.vv v20, v8, v12
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmsltu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16(
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    i64 %4)
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i16> %3,
-    <vscale x 16 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vmsltu.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsltu.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32(
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    i64 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i32> %3,
-    <vscale x 1 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vmsltu.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmsltu.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32(
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    i64 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i32> %3,
-    <vscale x 2 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmsltu.vv v0, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vmsltu.vv v14, v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmsltu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32(
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    i64 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i32> %3,
-    <vscale x 4 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsltu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vv_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmsltu.vv v0, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vmsltu.vv v20, v8, v12
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmsltu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32(
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    i64 %4)
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i32> %3,
-    <vscale x 8 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vmsltu.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmsltu.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64(
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    i64 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i64> %3,
-    <vscale x 1 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vmsltu.vv v0, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vmsltu.vv v14, v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmsltu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64(
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    i64 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i64> %3,
-    <vscale x 2 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vmsltu.vv v0, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vmsltu.vv v20, v8, v12
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmsltu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64(
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    i64 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i64> %3,
-    <vscale x 4 x i1> %mask,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  i8,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.i8(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.i8(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i8> %1,
-    i8 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  i8,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.i8(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.i8(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i8> %1,
-    i8 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  i8,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.i8(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.i8(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i8> %1,
-    i8 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  i8,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsltu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.i8(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.i8(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i8> %1,
-    i8 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  i8,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsltu_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.i8(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsltu.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.i8(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i8> %1,
-    i8 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  i8,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsltu_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.i8(
-  <vscale x 32 x i1>,
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsltu.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.i8(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i8> %1,
-    i8 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  i16,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.i16(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.i16(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i16> %1,
-    i16 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  i16,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.i16(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.i16(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i16> %1,
-    i16 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  i16,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.i16(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.i16(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i16> %1,
-    i16 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  i16,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsltu_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.i16(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsltu.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.i16(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i16> %1,
-    i16 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  i16,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsltu_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.i16(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsltu.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.i16(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i16> %1,
-    i16 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  i32,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  i32,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.i32(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.i32(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  i32,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.i32(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsltu.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.i32(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  i32,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsltu_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.i32(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsltu.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.i32(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  i64,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.i64(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i64>,
-  i64,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsltu.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.i64(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  i64,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.i64(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i64>,
-  i64,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsltu.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.i64(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  i64,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.i64(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i64>,
-  i64,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsltu.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.i64(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vmsleu.vi v0, v8, -16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    i8 -15,
-    i64 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsleu.vi v10, v8, -15, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.i8(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i8> %1,
-    i8 -14,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vmsleu.vi v0, v8, -14
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    i8 -13,
-    i64 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsleu.vi v10, v8, -13, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.i8(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i8> %1,
-    i8 -12,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vmsleu.vi v0, v8, -12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    i8 -11,
-    i64 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsleu.vi v10, v8, -11, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.i8(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i8> %1,
-    i8 -10,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsltu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vmsleu.vi v0, v8, -10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    i8 -9,
-    i64 %1)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsleu.vi v10, v8, -9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.i8(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i8> %1,
-    i8 -8,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsltu_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vi_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vmsleu.vi v0, v8, -8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    i8 -7,
-    i64 %1)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsleu.vi v11, v8, -7, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.i8(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i8> %1,
-    i8 -6,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmsltu_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vi_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vmsleu.vi v0, v8, -6
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    i8 -5,
-    i64 %1)
-
-  ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsleu.vi v13, v8, -5, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.i8(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i8> %1,
-    i8 -4,
-    <vscale x 32 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vmsleu.vi v0, v8, -4
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    i16 -3,
-    i64 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsleu.vi v10, v8, -3, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.i16(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i16> %1,
-    i16 -2,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vmsleu.vi v0, v8, -2
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    i16 -1,
-    i64 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsltu.vx v10, v8, zero, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.i16(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i16> %1,
-    i16 0,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vmsltu.vx v0, v8, zero
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    i16 0,
-    i64 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsleu.vi v10, v8, 0, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.i16(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i16> %1,
-    i16 1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsltu_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vmsleu.vi v0, v8, 1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    i16 2,
-    i64 %1)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsleu.vi v11, v8, 2, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.i16(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i16> %1,
-    i16 3,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsltu_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vi_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vmsleu.vi v0, v8, 3
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    i16 4,
-    i64 %1)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsleu.vi v13, v8, 4, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.i16(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i16> %1,
-    i16 5,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vmsleu.vi v0, v8, 5
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    i32 6,
-    i64 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsleu.vi v10, v8, 6, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i32> %1,
-    i32 7,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vmsleu.vi v0, v8, 7
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    i32 8,
-    i64 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsleu.vi v10, v8, 8, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.i32(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i32> %1,
-    i32 9,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    i32 10,
-    i64 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsleu.vi v11, v8, 10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.i32(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i32> %1,
-    i32 11,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsltu_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmsleu.vi v0, v8, 11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    i32 12,
-    i64 %1)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsleu.vi v13, v8, 12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.i32(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i32> %1,
-    i32 13,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vmsleu.vi v0, v8, 13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    i64 14,
-    i64 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsleu.vi v10, v8, 14, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.i64(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i64> %1,
-    i64 15,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vmsleu.vi v0, v8, 15
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    i64 16,
-    i64 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsleu.vi v11, v8, -16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.i64(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i64> %1,
-    i64 -15,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vmsleu.vi v0, v8, -15
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    i64 -14,
-    i64 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsleu.vi v13, v8, -14, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.i64(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i64> %1,
-    i64 -13,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i1> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsltu.ll
similarity index 85%
rename from llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vmsltu.ll
index 009d196740c50..f3d0ca355d5ee 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsltu.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
 declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -16,7 +18,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -26,9 +28,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -42,13 +44,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8(
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i8> %3,
     <vscale x 1 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -56,9 +58,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -68,7 +70,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -78,9 +80,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -94,13 +96,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8(
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i8> %3,
     <vscale x 2 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -108,9 +110,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -120,7 +122,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -130,9 +132,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -146,13 +148,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8(
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i8> %3,
     <vscale x 4 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -160,9 +162,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsltu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsltu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -172,7 +174,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -182,9 +184,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -198,13 +200,13 @@ entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8(
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i8> %3,
     <vscale x 8 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -212,9 +214,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsltu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsltu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -224,7 +226,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -234,9 +236,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -250,13 +252,13 @@ entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8(
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8(
     <vscale x 16 x i1> %0,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i8> %3,
     <vscale x 16 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -264,9 +266,9 @@ entry:
 declare <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsltu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsltu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -276,7 +278,7 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i1> %a
 }
@@ -286,9 +288,9 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -302,13 +304,13 @@ entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8(
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8(
     <vscale x 32 x i1> %0,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i8> %3,
     <vscale x 32 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 32 x i1> %a
 }
@@ -316,9 +318,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -328,7 +330,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -338,9 +340,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -354,13 +356,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16(
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i16> %3,
     <vscale x 1 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -368,9 +370,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -380,7 +382,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -390,9 +392,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -406,13 +408,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16(
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i16> %3,
     <vscale x 2 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -420,9 +422,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -432,7 +434,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -442,9 +444,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -458,13 +460,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16(
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i16> %3,
     <vscale x 4 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -472,9 +474,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsltu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsltu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -484,7 +486,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -494,9 +496,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -510,13 +512,13 @@ entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16(
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i16> %3,
     <vscale x 8 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -524,9 +526,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsltu_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsltu_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -536,7 +538,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -546,9 +548,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -562,13 +564,13 @@ entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16(
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16(
     <vscale x 16 x i1> %0,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i16> %3,
     <vscale x 16 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -576,9 +578,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -588,7 +590,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -598,9 +600,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -614,13 +616,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32(
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i32> %3,
     <vscale x 1 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -628,9 +630,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -640,7 +642,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -650,9 +652,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -666,13 +668,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32(
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i32> %3,
     <vscale x 2 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -680,9 +682,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -692,7 +694,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -702,9 +704,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -718,13 +720,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32(
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i32> %3,
     <vscale x 4 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -732,9 +734,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsltu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsltu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -744,7 +746,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -754,9 +756,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -770,13 +772,13 @@ entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32(
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i32> %3,
     <vscale x 8 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -784,9 +786,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -796,7 +798,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -806,9 +808,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -822,13 +824,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64(
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i64> %3,
     <vscale x 1 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -836,9 +838,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -848,7 +850,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -858,9 +860,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -874,13 +876,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64(
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i64> %3,
     <vscale x 2 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -888,9 +890,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -900,7 +902,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -910,9 +912,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -926,13 +928,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64(
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
-    i32 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i64> %3,
     <vscale x 4 x i1> %mask,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -940,9 +942,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -952,7 +954,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -962,9 +964,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -979,7 +981,7 @@ entry:
     <vscale x 1 x i8> %1,
     i8 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -987,9 +989,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -999,7 +1001,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1009,9 +1011,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1026,7 +1028,7 @@ entry:
     <vscale x 2 x i8> %1,
     i8 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1034,9 +1036,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -1046,7 +1048,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1056,9 +1058,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1073,7 +1075,7 @@ entry:
     <vscale x 4 x i8> %1,
     i8 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1081,9 +1083,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsltu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsltu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -1093,7 +1095,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1103,9 +1105,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1120,7 +1122,7 @@ entry:
     <vscale x 8 x i8> %1,
     i8 %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1128,9 +1130,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsltu_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsltu_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -1140,7 +1142,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1150,9 +1152,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -1167,7 +1169,7 @@ entry:
     <vscale x 16 x i8> %1,
     i8 %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1175,9 +1177,9 @@ entry:
 declare <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsltu_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsltu_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -1187,7 +1189,7 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i1> %a
 }
@@ -1197,9 +1199,9 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -1214,7 +1216,7 @@ entry:
     <vscale x 32 x i8> %1,
     i8 %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 32 x i1> %a
 }
@@ -1222,9 +1224,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1234,7 +1236,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1244,9 +1246,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1261,7 +1263,7 @@ entry:
     <vscale x 1 x i16> %1,
     i16 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1269,9 +1271,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1281,7 +1283,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1291,9 +1293,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1308,7 +1310,7 @@ entry:
     <vscale x 2 x i16> %1,
     i16 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1316,9 +1318,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1328,7 +1330,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1338,9 +1340,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1355,7 +1357,7 @@ entry:
     <vscale x 4 x i16> %1,
     i16 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1363,9 +1365,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsltu_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsltu_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1375,7 +1377,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1385,9 +1387,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -1402,7 +1404,7 @@ entry:
     <vscale x 8 x i16> %1,
     i16 %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1410,9 +1412,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsltu_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsltu_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1422,7 +1424,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1432,9 +1434,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -1449,7 +1451,7 @@ entry:
     <vscale x 16 x i16> %1,
     i16 %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1457,9 +1459,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1469,7 +1471,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1479,9 +1481,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1496,7 +1498,7 @@ entry:
     <vscale x 1 x i32> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1504,9 +1506,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1516,7 +1518,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1526,9 +1528,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1543,7 +1545,7 @@ entry:
     <vscale x 2 x i32> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1551,9 +1553,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1563,7 +1565,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1573,9 +1575,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -1590,7 +1592,7 @@ entry:
     <vscale x 4 x i32> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1598,9 +1600,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsltu_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsltu_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1610,7 +1612,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1620,9 +1622,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -1637,7 +1639,7 @@ entry:
     <vscale x 8 x i32> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1645,25 +1647,31 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT:    vlse64.v v9, (a0), zero
-; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsltu_vx_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vmsltu.vv v0, v8, v9
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsltu_vx_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT:    vmsltu.vx v0, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1673,30 +1681,39 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
   <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT:    vlse64.v v11, (a0), zero
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsltu.vv v10, v8, v11, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsltu_mask_vx_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT:    vlse64.v v11, (a0), zero
+; RV32-NEXT:    vmv1r.v v10, v0
+; RV32-NEXT:    vmv1r.v v0, v9
+; RV32-NEXT:    vmsltu.vv v10, v8, v11, v0.t
+; RV32-NEXT:    vmv.v.v v0, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsltu_mask_vx_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vmv1r.v v10, v0
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT:    vmv1r.v v0, v9
+; RV64-NEXT:    vmsltu.vx v10, v8, a0, v0.t
+; RV64-NEXT:    vmv.v.v v0, v10
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i64> %1,
     i64 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1704,25 +1721,31 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vmsltu.vv v0, v8, v10
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsltu_vx_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vmsltu.vv v0, v8, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsltu_vx_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT:    vmsltu.vx v0, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1732,30 +1755,39 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
   <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsltu.vv v11, v8, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsltu_mask_vx_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vmv1r.v v11, v0
+; RV32-NEXT:    vmv1r.v v0, v10
+; RV32-NEXT:    vmsltu.vv v11, v8, v12, v0.t
+; RV32-NEXT:    vmv1r.v v0, v11
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsltu_mask_vx_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vmv1r.v v11, v0
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT:    vmv1r.v v0, v10
+; RV64-NEXT:    vmsltu.vx v11, v8, a0, v0.t
+; RV64-NEXT:    vmv1r.v v0, v11
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i64> %1,
     i64 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1763,25 +1795,31 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vmsltu.vv v0, v8, v12
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsltu_vx_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vmsltu.vv v0, v8, v12
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsltu_vx_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT:    vmsltu.vx v0, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1791,35 +1829,44 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
   <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsltu.vv v13, v8, v16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsltu_mask_vx_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vmv1r.v v13, v0
+; RV32-NEXT:    vmv1r.v v0, v12
+; RV32-NEXT:    vmsltu.vv v13, v8, v16, v0.t
+; RV32-NEXT:    vmv1r.v v0, v13
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsltu_mask_vx_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vmv1r.v v13, v0
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT:    vmv1r.v v0, v12
+; RV64-NEXT:    vmsltu.vx v13, v8, a0, v0.t
+; RV64-NEXT:    vmv1r.v v0, v13
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i64> %1,
     i64 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -1829,12 +1876,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
     i8 -15,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1849,12 +1896,12 @@ entry:
     <vscale x 1 x i8> %1,
     i8 -14,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -1864,12 +1911,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
     i8 -13,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1884,12 +1931,12 @@ entry:
     <vscale x 2 x i8> %1,
     i8 -12,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -1899,12 +1946,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
     i8 -11,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1919,12 +1966,12 @@ entry:
     <vscale x 4 x i8> %1,
     i8 -10,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsltu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsltu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -1934,12 +1981,12 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
     i8 -9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1954,12 +2001,12 @@ entry:
     <vscale x 8 x i8> %1,
     i8 -8,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmsltu_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsltu_vi_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -1969,12 +2016,12 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
     i8 -7,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -1989,12 +2036,12 @@ entry:
     <vscale x 16 x i8> %1,
     i8 -6,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 32 x i1> @intrinsic_vmsltu_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsltu_vi_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -2004,12 +2051,12 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
     i8 -5,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 32 x i1> %a
 }
 
-define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -2024,12 +2071,12 @@ entry:
     <vscale x 32 x i8> %1,
     i8 -4,
     <vscale x 32 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 32 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -2039,12 +2086,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
     i16 -3,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2059,12 +2106,12 @@ entry:
     <vscale x 1 x i16> %1,
     i16 -2,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -2074,12 +2121,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
     i16 -1,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2094,12 +2141,12 @@ entry:
     <vscale x 2 x i16> %1,
     i16 0,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -2109,12 +2156,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
     i16 0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2129,12 +2176,12 @@ entry:
     <vscale x 4 x i16> %1,
     i16 1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsltu_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsltu_vi_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -2144,12 +2191,12 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
     i16 2,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -2164,12 +2211,12 @@ entry:
     <vscale x 8 x i16> %1,
     i16 3,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmsltu_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsltu_vi_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -2179,12 +2226,12 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
     i16 4,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -2199,12 +2246,12 @@ entry:
     <vscale x 16 x i16> %1,
     i16 5,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -2214,12 +2261,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     i32 6,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2234,12 +2281,12 @@ entry:
     <vscale x 1 x i32> %1,
     i32 7,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -2249,12 +2296,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     i32 8,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2269,12 +2316,12 @@ entry:
     <vscale x 2 x i32> %1,
     i32 9,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -2284,12 +2331,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     i32 10,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -2304,12 +2351,12 @@ entry:
     <vscale x 4 x i32> %1,
     i32 11,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsltu_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsltu_vi_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -2319,12 +2366,12 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     i32 12,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -2339,12 +2386,12 @@ entry:
     <vscale x 8 x i32> %1,
     i32 13,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -2354,12 +2401,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 14,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2374,12 +2421,12 @@ entry:
     <vscale x 1 x i64> %1,
     i64 15,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -2389,12 +2436,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 16,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -2409,12 +2456,12 @@ entry:
     <vscale x 2 x i64> %1,
     i64 -15,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -2424,12 +2471,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 -14,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -2444,7 +2491,7 @@ entry:
     <vscale x 4 x i64> %1,
     i64 -13,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll
deleted file mode 100644
index 945153302bbdc..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll
+++ /dev/null
@@ -1,2450 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmsne.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsne.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    i32 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i8> %3,
-    <vscale x 1 x i1> %mask,
-    i32 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmsne.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsne.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8(
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    i32 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i8> %3,
-    <vscale x 2 x i1> %mask,
-    i32 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmsne.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsne.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8(
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    i32 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i8> %3,
-    <vscale x 4 x i1> %mask,
-    i32 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsne_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmsne.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmsne.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8(
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    i32 %4)
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i8> %3,
-    <vscale x 8 x i1> %mask,
-    i32 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i32);
-
-define <vscale x 16 x i1> @intrinsic_vmsne_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vv_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vmsne.vv v0, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i1> @intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmsne.vv v14, v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmsne.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8(
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    i32 %4)
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i8> %3,
-    <vscale x 16 x i1> %mask,
-    i32 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i32);
-
-define <vscale x 32 x i1> @intrinsic_vmsne_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vv_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vmsne.vv v0, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8(
-  <vscale x 32 x i1>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i32);
-
-define <vscale x 32 x i1> @intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmsne.vv v20, v8, v12
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmsne.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8(
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    i32 %4)
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i8> %3,
-    <vscale x 32 x i1> %mask,
-    i32 %4)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vmsne.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsne.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16(
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    i32 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i16> %3,
-    <vscale x 1 x i1> %mask,
-    i32 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vmsne.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsne.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16(
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    i32 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i16> %3,
-    <vscale x 2 x i1> %mask,
-    i32 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vmsne.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmsne.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16(
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    i32 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i16> %3,
-    <vscale x 4 x i1> %mask,
-    i32 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsne_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vmsne.vv v0, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vmsne.vv v14, v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmsne.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16(
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    i32 %4)
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i16> %3,
-    <vscale x 8 x i1> %mask,
-    i32 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i32);
-
-define <vscale x 16 x i1> @intrinsic_vmsne_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vv_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vmsne.vv v0, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i1> @intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vmsne.vv v20, v8, v12
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmsne.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16(
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    i32 %4)
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i16> %3,
-    <vscale x 16 x i1> %mask,
-    i32 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vmsne.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmsne.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32(
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    i32 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i32> %3,
-    <vscale x 1 x i1> %mask,
-    i32 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vmsne.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmsne.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32(
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    i32 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i32> %3,
-    <vscale x 2 x i1> %mask,
-    i32 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmsne.vv v0, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vmsne.vv v14, v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmsne.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32(
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    i32 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i32> %3,
-    <vscale x 4 x i1> %mask,
-    i32 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsne_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmsne.vv v0, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vmsne.vv v20, v8, v12
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmsne.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32(
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    i32 %4)
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i32> %3,
-    <vscale x 8 x i1> %mask,
-    i32 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vmsne.vv v8, v8, v9
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vmsne.vv v11, v9, v10, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    i32 %4)
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i64> %3,
-    <vscale x 1 x i1> %mask,
-    i32 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vmsne.vv v0, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vmsne.vv v14, v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v14
-; CHECK-NEXT:    vmsne.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64(
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    i32 %4)
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i64> %3,
-    <vscale x 2 x i1> %mask,
-    i32 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vmsne.vv v0, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vmsne.vv v20, v8, v12
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v20
-; CHECK-NEXT:    vmsne.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64(
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    i32 %4)
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i64> %3,
-    <vscale x 4 x i1> %mask,
-    i32 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  i8,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsne.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i8> %1,
-    i8 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  i8,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsne.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i8> %1,
-    i8 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  i8,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsne.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i8> %1,
-    i8 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  i8,
-  i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsne_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsne.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i8> %1,
-    i8 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  i8,
-  i32);
-
-define <vscale x 16 x i1> @intrinsic_vmsne_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsne.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i8> %1,
-    i8 %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  i8,
-  i32);
-
-define <vscale x 32 x i1> @intrinsic_vmsne_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8(
-  <vscale x 32 x i1>,
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i1>,
-  i32);
-
-define <vscale x 32 x i1> @intrinsic_vmsne_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsne.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i8> %1,
-    i8 %2,
-    <vscale x 32 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  i16,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsne.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i16> %1,
-    i16 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  i16,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsne.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i16> %1,
-    i16 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  i16,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsne.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i16> %1,
-    i16 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  i16,
-  i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsne_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsne.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i16> %1,
-    i16 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  i16,
-  i32);
-
-define <vscale x 16 x i1> @intrinsic_vmsne_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsne.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i16> %1,
-    i16 %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  i32,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsne.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  i32,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsne.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  i32,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsne.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  i32,
-  i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsne_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsne.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  i64,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT:    vlse64.v v9, (a0), zero
-; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    i64 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i64>,
-  i64,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT:    vlse64.v v11, (a0), zero
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsne.vv v10, v8, v11, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  i64,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vmsne.vv v0, v8, v10
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    i64 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i64>,
-  i64,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsne.vv v11, v8, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  i64,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vmsne.vv v0, v8, v12
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    i64 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i64>,
-  i64,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsne.vv v13, v8, v16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    i8 9,
-    i32 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsne.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i8> %1,
-    i8 9,
-    <vscale x 1 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    i8 9,
-    i32 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsne.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i8> %1,
-    i8 9,
-    <vscale x 2 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    i8 9,
-    i32 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsne.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i8> %1,
-    i8 9,
-    <vscale x 4 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsne_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vi_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    i8 9,
-    i32 %1)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsne.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i8> %1,
-    i8 9,
-    <vscale x 8 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsne_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vi_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    i8 9,
-    i32 %1)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsne.vi v11, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i8> %1,
-    i8 9,
-    <vscale x 16 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmsne_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vi_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    i8 9,
-    i32 %1)
-
-  ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 32 x i1> @intrinsic_vmsne_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsne.vi v13, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i8> %1,
-    i8 9,
-    <vscale x 32 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 32 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    i16 9,
-    i32 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsne.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i16> %1,
-    i16 9,
-    <vscale x 1 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    i16 9,
-    i32 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsne.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i16> %1,
-    i16 9,
-    <vscale x 2 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    i16 9,
-    i32 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsne.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i16> %1,
-    i16 9,
-    <vscale x 4 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsne_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vi_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    i16 9,
-    i32 %1)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsne.vi v11, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i16> %1,
-    i16 9,
-    <vscale x 8 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsne_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vi_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    i16 9,
-    i32 %1)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsne.vi v13, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i16> %1,
-    i16 9,
-    <vscale x 16 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 16 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsne.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i32> %1,
-    i32 9,
-    <vscale x 1 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsne.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i32> %1,
-    i32 9,
-    <vscale x 2 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsne.vi v11, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i32> %1,
-    i32 9,
-    <vscale x 4 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsne_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vi_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsne.vi v13, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i32> %1,
-    i32 9,
-    <vscale x 8 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    i64 9,
-    i32 %1)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsne.vi v10, v8, 9, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i64> %1,
-    i64 9,
-    <vscale x 1 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    i64 9,
-    i32 %1)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsne.vi v11, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i64> %1,
-    i64 9,
-    <vscale x 2 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    i64 9,
-    i32 %1)
-
-  ret <vscale x 4 x i1> %a
-}
-
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsne.vi v13, v8, 9, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i64> %1,
-    i64 9,
-    <vscale x 4 x i1> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i1> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsne.ll
similarity index 84%
rename from llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll
rename to llvm/test/CodeGen/RISCV/rvv/vmsne.ll
index 600881a68fbd4..ea7ca05f85e53 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsne.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
 declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -16,7 +18,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -26,9 +28,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -42,13 +44,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
-    i64 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i8> %3,
     <vscale x 1 x i1> %mask,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -56,9 +58,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -68,7 +70,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -78,9 +80,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -94,13 +96,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8(
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
-    i64 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i8> %3,
     <vscale x 2 x i1> %mask,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -108,9 +110,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -120,7 +122,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -130,9 +132,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -146,13 +148,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8(
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
-    i64 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i8> %3,
     <vscale x 4 x i1> %mask,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -160,9 +162,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsne_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsne_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -172,7 +174,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -182,9 +184,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -198,13 +200,13 @@ entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8(
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
-    i64 %4)
+    iXLen %4)
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i8> %3,
     <vscale x 8 x i1> %mask,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -212,9 +214,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsne_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsne_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -224,7 +226,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -234,9 +236,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -250,13 +252,13 @@ entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8(
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
-    i64 %4)
+    iXLen %4)
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8(
     <vscale x 16 x i1> %0,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i8> %3,
     <vscale x 16 x i1> %mask,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -264,9 +266,9 @@ entry:
 declare <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsne_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsne_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -276,7 +278,7 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i1> %a
 }
@@ -286,9 +288,9 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -302,13 +304,13 @@ entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8(
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
-    i64 %4)
+    iXLen %4)
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8(
     <vscale x 32 x i1> %0,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i8> %3,
     <vscale x 32 x i1> %mask,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 32 x i1> %a
 }
@@ -316,9 +318,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -328,7 +330,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -338,9 +340,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -354,13 +356,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16(
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
-    i64 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i16> %3,
     <vscale x 1 x i1> %mask,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -368,9 +370,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -380,7 +382,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -390,9 +392,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -406,13 +408,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16(
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
-    i64 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i16> %3,
     <vscale x 2 x i1> %mask,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -420,9 +422,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -432,7 +434,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -442,9 +444,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -458,13 +460,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16(
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
-    i64 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i16> %3,
     <vscale x 4 x i1> %mask,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -472,9 +474,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsne_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsne_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -484,7 +486,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -494,9 +496,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -510,13 +512,13 @@ entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16(
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
-    i64 %4)
+    iXLen %4)
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i16> %3,
     <vscale x 8 x i1> %mask,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -524,9 +526,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsne_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsne_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -536,7 +538,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -546,9 +548,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -562,13 +564,13 @@ entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16(
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
-    i64 %4)
+    iXLen %4)
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16(
     <vscale x 16 x i1> %0,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i16> %3,
     <vscale x 16 x i1> %mask,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -576,9 +578,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -588,7 +590,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -598,9 +600,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -614,13 +616,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32(
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
-    i64 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i32> %3,
     <vscale x 1 x i1> %mask,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -628,9 +630,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -640,7 +642,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -650,9 +652,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -666,13 +668,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32(
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
-    i64 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i32> %3,
     <vscale x 2 x i1> %mask,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -680,9 +682,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -692,7 +694,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -702,9 +704,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -718,13 +720,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32(
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
-    i64 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i32> %3,
     <vscale x 4 x i1> %mask,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -732,9 +734,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsne_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsne_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -744,7 +746,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -754,9 +756,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -770,13 +772,13 @@ entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32(
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
-    i64 %4)
+    iXLen %4)
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i32> %3,
     <vscale x 8 x i1> %mask,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -784,9 +786,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -796,7 +798,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -806,9 +808,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -822,13 +824,13 @@ entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
-    i64 %4)
+    iXLen %4)
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i64> %3,
     <vscale x 1 x i1> %mask,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -836,9 +838,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -848,7 +850,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -858,9 +860,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -874,13 +876,13 @@ entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64(
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
-    i64 %4)
+    iXLen %4)
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i64> %3,
     <vscale x 2 x i1> %mask,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -888,9 +890,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -900,7 +902,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -910,9 +912,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -926,13 +928,13 @@ entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64(
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
-    i64 %4)
+    iXLen %4)
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i64> %3,
     <vscale x 4 x i1> %mask,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -940,9 +942,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -952,7 +954,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8(
     <vscale x 1 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -962,9 +964,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -979,7 +981,7 @@ entry:
     <vscale x 1 x i8> %1,
     i8 %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -987,9 +989,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -999,7 +1001,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8(
     <vscale x 2 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1009,9 +1011,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1026,7 +1028,7 @@ entry:
     <vscale x 2 x i8> %1,
     i8 %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1034,9 +1036,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -1046,7 +1048,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8(
     <vscale x 4 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1056,9 +1058,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1073,7 +1075,7 @@ entry:
     <vscale x 4 x i8> %1,
     i8 %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1081,9 +1083,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsne_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsne_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -1093,7 +1095,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8(
     <vscale x 8 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1103,9 +1105,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1120,7 +1122,7 @@ entry:
     <vscale x 8 x i8> %1,
     i8 %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1128,9 +1130,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsne_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsne_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -1140,7 +1142,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8(
     <vscale x 16 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1150,9 +1152,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -1167,7 +1169,7 @@ entry:
     <vscale x 16 x i8> %1,
     i8 %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1175,9 +1177,9 @@ entry:
 declare <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsne_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsne_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -1187,7 +1189,7 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8(
     <vscale x 32 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i1> %a
 }
@@ -1197,9 +1199,9 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsne_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsne_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -1214,7 +1216,7 @@ entry:
     <vscale x 32 x i8> %1,
     i8 %2,
     <vscale x 32 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 32 x i1> %a
 }
@@ -1222,9 +1224,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1234,7 +1236,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16(
     <vscale x 1 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1244,9 +1246,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1261,7 +1263,7 @@ entry:
     <vscale x 1 x i16> %1,
     i16 %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1269,9 +1271,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1281,7 +1283,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16(
     <vscale x 2 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1291,9 +1293,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1308,7 +1310,7 @@ entry:
     <vscale x 2 x i16> %1,
     i16 %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1316,9 +1318,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1328,7 +1330,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16(
     <vscale x 4 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1338,9 +1340,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1355,7 +1357,7 @@ entry:
     <vscale x 4 x i16> %1,
     i16 %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1363,9 +1365,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsne_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsne_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1375,7 +1377,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16(
     <vscale x 8 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1385,9 +1387,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -1402,7 +1404,7 @@ entry:
     <vscale x 8 x i16> %1,
     i16 %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1410,9 +1412,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsne_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsne_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1422,7 +1424,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16(
     <vscale x 16 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1432,9 +1434,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -1449,7 +1451,7 @@ entry:
     <vscale x 16 x i16> %1,
     i16 %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 16 x i1> %a
 }
@@ -1457,9 +1459,9 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1469,7 +1471,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     i32 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1479,9 +1481,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1496,7 +1498,7 @@ entry:
     <vscale x 1 x i32> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1504,9 +1506,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1516,7 +1518,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     i32 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1526,9 +1528,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1543,7 +1545,7 @@ entry:
     <vscale x 2 x i32> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1551,9 +1553,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1563,7 +1565,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     i32 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1573,9 +1575,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -1590,7 +1592,7 @@ entry:
     <vscale x 4 x i32> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1598,9 +1600,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsne_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsne_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1610,7 +1612,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     i32 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1620,9 +1622,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -1637,7 +1639,7 @@ entry:
     <vscale x 8 x i32> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 8 x i1> %a
 }
@@ -1645,19 +1647,31 @@ entry:
 declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsne_vx_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vmsne.vv v0, v8, v9
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsne_vx_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT:    vmsne.vx v0, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1667,24 +1681,39 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
   <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsne.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v10
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsne_mask_vx_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT:    vlse64.v v11, (a0), zero
+; RV32-NEXT:    vmv1r.v v10, v0
+; RV32-NEXT:    vmv1r.v v0, v9
+; RV32-NEXT:    vmsne.vv v10, v8, v11, v0.t
+; RV32-NEXT:    vmv.v.v v0, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsne_mask_vx_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vmv1r.v v10, v0
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT:    vmv1r.v v0, v9
+; RV64-NEXT:    vmsne.vx v10, v8, a0, v0.t
+; RV64-NEXT:    vmv.v.v v0, v10
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i64> %1,
     i64 %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i1> %a
 }
@@ -1692,19 +1721,31 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsne_vx_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vmsne.vv v0, v8, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsne_vx_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT:    vmsne.vx v0, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1714,24 +1755,39 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
   <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v11, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmsne.vx v11, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsne_mask_vx_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vmv1r.v v11, v0
+; RV32-NEXT:    vmv1r.v v0, v10
+; RV32-NEXT:    vmsne.vv v11, v8, v12, v0.t
+; RV32-NEXT:    vmv1r.v v0, v11
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsne_mask_vx_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vmv1r.v v11, v0
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT:    vmv1r.v v0, v10
+; RV64-NEXT:    vmsne.vx v11, v8, a0, v0.t
+; RV64-NEXT:    vmv1r.v v0, v11
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i64> %1,
     i64 %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i1> %a
 }
@@ -1739,19 +1795,31 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vmsne_vx_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vmsne.vv v0, v8, v12
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsne_vx_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT:    vmsne.vx v0, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -1761,29 +1829,44 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
   <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v13, v0
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmsne.vx v13, v8, a0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vmsne_mask_vx_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vmv1r.v v13, v0
+; RV32-NEXT:    vmv1r.v v0, v12
+; RV32-NEXT:    vmsne.vv v13, v8, v16, v0.t
+; RV32-NEXT:    vmv1r.v v0, v13
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vmsne_mask_vx_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vmv1r.v v13, v0
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT:    vmv1r.v v0, v12
+; RV64-NEXT:    vmsne.vx v13, v8, a0, v0.t
+; RV64-NEXT:    vmv1r.v v0, v13
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i64> %1,
     i64 %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -1793,12 +1876,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8(
     <vscale x 1 x i8> %0,
     i8 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1813,12 +1896,12 @@ entry:
     <vscale x 1 x i8> %1,
     i8 9,
     <vscale x 1 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -1828,12 +1911,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8(
     <vscale x 2 x i8> %0,
     i8 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1848,12 +1931,12 @@ entry:
     <vscale x 2 x i8> %1,
     i8 9,
     <vscale x 2 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -1863,12 +1946,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8(
     <vscale x 4 x i8> %0,
     i8 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1883,12 +1966,12 @@ entry:
     <vscale x 4 x i8> %1,
     i8 9,
     <vscale x 4 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsne_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsne_vi_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -1898,12 +1981,12 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8(
     <vscale x 8 x i8> %0,
     i8 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -1918,12 +2001,12 @@ entry:
     <vscale x 8 x i8> %1,
     i8 9,
     <vscale x 8 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmsne_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsne_vi_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -1933,12 +2016,12 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8(
     <vscale x 16 x i8> %0,
     i8 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -1953,12 +2036,12 @@ entry:
     <vscale x 16 x i8> %1,
     i8 9,
     <vscale x 16 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 32 x i1> @intrinsic_vmsne_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsne_vi_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -1968,12 +2051,12 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8(
     <vscale x 32 x i8> %0,
     i8 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 32 x i1> %a
 }
 
-define <vscale x 32 x i1> @intrinsic_vmsne_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsne_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -1988,12 +2071,12 @@ entry:
     <vscale x 32 x i8> %1,
     i8 9,
     <vscale x 32 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 32 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -2003,12 +2086,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16(
     <vscale x 1 x i16> %0,
     i16 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2023,12 +2106,12 @@ entry:
     <vscale x 1 x i16> %1,
     i16 9,
     <vscale x 1 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -2038,12 +2121,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16(
     <vscale x 2 x i16> %0,
     i16 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2058,12 +2141,12 @@ entry:
     <vscale x 2 x i16> %1,
     i16 9,
     <vscale x 2 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -2073,12 +2156,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16(
     <vscale x 4 x i16> %0,
     i16 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2093,12 +2176,12 @@ entry:
     <vscale x 4 x i16> %1,
     i16 9,
     <vscale x 4 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsne_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsne_vi_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -2108,12 +2191,12 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16(
     <vscale x 8 x i16> %0,
     i16 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -2128,12 +2211,12 @@ entry:
     <vscale x 8 x i16> %1,
     i16 9,
     <vscale x 8 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmsne_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsne_vi_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -2143,12 +2226,12 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16(
     <vscale x 16 x i16> %0,
     i16 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -2163,12 +2246,12 @@ entry:
     <vscale x 16 x i16> %1,
     i16 9,
     <vscale x 16 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -2178,12 +2261,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     i32 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2198,12 +2281,12 @@ entry:
     <vscale x 1 x i32> %1,
     i32 9,
     <vscale x 1 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -2213,12 +2296,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     i32 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2233,12 +2316,12 @@ entry:
     <vscale x 2 x i32> %1,
     i32 9,
     <vscale x 2 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -2248,12 +2331,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     i32 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -2268,12 +2351,12 @@ entry:
     <vscale x 4 x i32> %1,
     i32 9,
     <vscale x 4 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsne_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsne_vi_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -2283,12 +2366,12 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     i32 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -2303,12 +2386,12 @@ entry:
     <vscale x 8 x i32> %1,
     i32 9,
     <vscale x 8 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -2318,12 +2401,12 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -2338,12 +2421,12 @@ entry:
     <vscale x 1 x i64> %1,
     i64 9,
     <vscale x 1 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -2353,12 +2436,12 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v11, v0
@@ -2373,12 +2456,12 @@ entry:
     <vscale x 2 x i64> %1,
     i64 9,
     <vscale x 2 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -2388,12 +2471,12 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i1> %a
 }
 
-define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v13, v0
@@ -2408,7 +2491,7 @@ entry:
     <vscale x 4 x i64> %1,
     i64 9,
     <vscale x 4 x i1> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i1> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll
deleted file mode 100644
index ff86a87b8cb86..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll
+++ /dev/null
@@ -1,1694 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8>  @intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
-; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i8>  @intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8>  @intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
-; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i8>  @intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
-; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8>  @intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
-; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i8>  @intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
-; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8>  @intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
-; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64, i64);
-
-define <vscale x 8 x i8>  @intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
-; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8>  @intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
-; CHECK-NEXT:    vnmsac.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64, i64);
-
-define <vscale x 16 x i8>  @intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
-; CHECK-NEXT:    vnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8>  @intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
-; CHECK-NEXT:    vnmsac.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64, i64);
-
-define <vscale x 32 x i8>  @intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
-; CHECK-NEXT:    vnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16>  @intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
-; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i16>  @intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16>  @intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
-; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i16>  @intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16>  @intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
-; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i16>  @intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16>  @intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
-; CHECK-NEXT:    vnmsac.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64, i64);
-
-define <vscale x 8 x i16>  @intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16>  @intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
-; CHECK-NEXT:    vnmsac.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64, i64);
-
-define <vscale x 16 x i16>  @intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32>  @intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
-; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i32>  @intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32>  @intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
-; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i32>  @intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32>  @intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
-; CHECK-NEXT:    vnmsac.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i32>  @intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32>  @intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
-; CHECK-NEXT:    vnmsac.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64, i64);
-
-define <vscale x 8 x i32>  @intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64>  @intrinsic_vnmsac_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
-; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i64>  @intrinsic_vnmsac_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64>  @intrinsic_vnmsac_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
-; CHECK-NEXT:    vnmsac.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i64>  @intrinsic_vnmsac_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64>  @intrinsic_vnmsac_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
-; CHECK-NEXT:    vnmsac.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i64>  @intrinsic_vnmsac_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i8>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8>  @intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
-; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    <vscale x 1 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i8> @intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
-; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i8>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8>  @intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
-; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    <vscale x 2 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i8> @intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
-; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i8>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8>  @intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, ma
-; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    <vscale x 4 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i8> @intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
-; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i8>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8>  @intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, ma
-; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    <vscale x 8 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64, i64);
-
-define <vscale x 8 x i8> @intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
-; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i8>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8>  @intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, ma
-; CHECK-NEXT:    vnmsac.vx v8, a0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    <vscale x 16 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64, i64);
-
-define <vscale x 16 x i8> @intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
-; CHECK-NEXT:    vnmsac.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i8>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8>  @intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, ma
-; CHECK-NEXT:    vnmsac.vx v8, a0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    <vscale x 32 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64, i64);
-
-define <vscale x 32 x i8> @intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
-; CHECK-NEXT:    vnmsac.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i16>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16>  @intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
-; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    <vscale x 1 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i16> @intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
-; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i16>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16>  @intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
-; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    <vscale x 2 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i16> @intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
-; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i16>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16>  @intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
-; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    <vscale x 4 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i16> @intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
-; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i16>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16>  @intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, ma
-; CHECK-NEXT:    vnmsac.vx v8, a0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    <vscale x 8 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64, i64);
-
-define <vscale x 8 x i16> @intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
-; CHECK-NEXT:    vnmsac.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i16>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16>  @intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, ma
-; CHECK-NEXT:    vnmsac.vx v8, a0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    <vscale x 16 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64, i64);
-
-define <vscale x 16 x i16> @intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
-; CHECK-NEXT:    vnmsac.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i32>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32>  @intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
-; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    <vscale x 1 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i32> @intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i32>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32>  @intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
-; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    <vscale x 2 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i32> @intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i32>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32>  @intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
-; CHECK-NEXT:    vnmsac.vx v8, a0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    <vscale x 4 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i32> @intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vnmsac.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i32>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32>  @intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, ma
-; CHECK-NEXT:    vnmsac.vx v8, a0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    <vscale x 8 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64, i64);
-
-define <vscale x 8 x i32> @intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vnmsac.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  i64,
-  <vscale x 1 x i64>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64>  @intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
-; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    i64 %1,
-    <vscale x 1 x i64> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  i64,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i64> @intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    i64 %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  i64,
-  <vscale x 2 x i64>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64>  @intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, ma
-; CHECK-NEXT:    vnmsac.vx v8, a0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    i64 %1,
-    <vscale x 2 x i64> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  i64,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i64> @intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vnmsac.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    i64 %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  i64,
-  <vscale x 4 x i64>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64>  @intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, ma
-; CHECK-NEXT:    vnmsac.vx v8, a0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    i64 %1,
-    <vscale x 4 x i64> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  i64,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i64> @intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vnmsac.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    i64 %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsac.ll
similarity index 80%
rename from llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vnmsac.ll
index 4a8fb8d9e1550..563d6c51d0e44 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnmsac.ll
@@ -1,14 +1,16 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
 declare <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8>  @intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+define <vscale x 1 x i8>  @intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
@@ -19,7 +21,7 @@ entry:
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -29,9 +31,9 @@ declare <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i8>  @intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8>  @intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
@@ -43,7 +45,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -52,10 +54,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8>  @intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+define <vscale x 2 x i8>  @intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
@@ -66,7 +68,7 @@ entry:
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -76,9 +78,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i8>  @intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8>  @intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
@@ -90,7 +92,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -99,10 +101,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8>  @intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+define <vscale x 4 x i8>  @intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
@@ -113,7 +115,7 @@ entry:
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -123,9 +125,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i8>  @intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8>  @intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
@@ -137,7 +139,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -146,10 +148,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8>  @intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+define <vscale x 8 x i8>  @intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
@@ -160,7 +162,7 @@ entry:
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -170,9 +172,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 8 x i8>  @intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8>  @intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
@@ -184,7 +186,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -193,10 +195,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8>  @intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+define <vscale x 16 x i8>  @intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
@@ -207,7 +209,7 @@ entry:
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -217,9 +219,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 16 x i8>  @intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8>  @intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
@@ -231,7 +233,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -240,10 +242,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8>  @intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+define <vscale x 32 x i8>  @intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
@@ -254,7 +256,7 @@ entry:
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -264,9 +266,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 32 x i8>  @intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8>  @intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
@@ -278,7 +280,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -287,10 +289,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16>  @intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+define <vscale x 1 x i16>  @intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
@@ -301,7 +303,7 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -311,9 +313,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i16>  @intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16>  @intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -325,7 +327,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -334,10 +336,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16>  @intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+define <vscale x 2 x i16>  @intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
@@ -348,7 +350,7 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -358,9 +360,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i16>  @intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16>  @intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -372,7 +374,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -381,10 +383,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16>  @intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define <vscale x 4 x i16>  @intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
@@ -395,7 +397,7 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -405,9 +407,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i16>  @intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16>  @intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -419,7 +421,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -428,10 +430,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16>  @intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+define <vscale x 8 x i16>  @intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
@@ -442,7 +444,7 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -452,9 +454,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 8 x i16>  @intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16>  @intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -466,7 +468,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -475,10 +477,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16>  @intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+define <vscale x 16 x i16>  @intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
@@ -489,7 +491,7 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -499,9 +501,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 16 x i16>  @intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16>  @intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -513,7 +515,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -522,10 +524,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32>  @intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+define <vscale x 1 x i32>  @intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
@@ -536,7 +538,7 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -546,9 +548,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i32>  @intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32>  @intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -560,7 +562,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -569,10 +571,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32>  @intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+define <vscale x 2 x i32>  @intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
@@ -583,7 +585,7 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -593,9 +595,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i32>  @intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32>  @intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -607,7 +609,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -616,10 +618,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32>  @intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+define <vscale x 4 x i32>  @intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
@@ -630,7 +632,7 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -640,9 +642,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i32>  @intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32>  @intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -654,7 +656,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -663,10 +665,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32>  @intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+define <vscale x 8 x i32>  @intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
@@ -677,7 +679,7 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -687,9 +689,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 8 x i32>  @intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32>  @intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -701,7 +703,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -710,10 +712,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64>  @intrinsic_vnmsac_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+define <vscale x 1 x i64>  @intrinsic_vnmsac_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
@@ -724,7 +726,7 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -734,9 +736,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i64>  @intrinsic_vnmsac_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64>  @intrinsic_vnmsac_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -748,7 +750,7 @@ entry:
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -757,10 +759,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64>  @intrinsic_vnmsac_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+define <vscale x 2 x i64>  @intrinsic_vnmsac_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
@@ -771,7 +773,7 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -781,9 +783,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i64>  @intrinsic_vnmsac_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64>  @intrinsic_vnmsac_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -795,7 +797,7 @@ entry:
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -804,10 +806,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64>  @intrinsic_vnmsac_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+define <vscale x 4 x i64>  @intrinsic_vnmsac_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
@@ -818,7 +820,7 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -828,9 +830,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i64>  @intrinsic_vnmsac_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64>  @intrinsic_vnmsac_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -842,7 +844,7 @@ entry:
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -851,10 +853,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8>  @intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+define <vscale x 1 x i8>  @intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
@@ -865,7 +867,7 @@ entry:
     <vscale x 1 x i8> %0,
     i8 %1,
     <vscale x 1 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -875,9 +877,9 @@ declare <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8(
   i8,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
@@ -889,7 +891,7 @@ entry:
     i8 %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -898,10 +900,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8>  @intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+define <vscale x 2 x i8>  @intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
@@ -912,7 +914,7 @@ entry:
     <vscale x 2 x i8> %0,
     i8 %1,
     <vscale x 2 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -922,9 +924,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8(
   i8,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
@@ -936,7 +938,7 @@ entry:
     i8 %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -945,10 +947,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8>  @intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+define <vscale x 4 x i8>  @intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, ma
@@ -959,7 +961,7 @@ entry:
     <vscale x 4 x i8> %0,
     i8 %1,
     <vscale x 4 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -969,9 +971,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8(
   i8,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
@@ -983,7 +985,7 @@ entry:
     i8 %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -992,10 +994,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8>  @intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+define <vscale x 8 x i8>  @intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, ma
@@ -1006,7 +1008,7 @@ entry:
     <vscale x 8 x i8> %0,
     i8 %1,
     <vscale x 8 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1016,9 +1018,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8(
   i8,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
@@ -1030,7 +1032,7 @@ entry:
     i8 %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1039,10 +1041,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8>  @intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+define <vscale x 16 x i8>  @intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, ma
@@ -1053,7 +1055,7 @@ entry:
     <vscale x 16 x i8> %0,
     i8 %1,
     <vscale x 16 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1063,9 +1065,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8(
   i8,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
@@ -1077,7 +1079,7 @@ entry:
     i8 %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1086,10 +1088,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8>  @intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+define <vscale x 32 x i8>  @intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, ma
@@ -1100,7 +1102,7 @@ entry:
     <vscale x 32 x i8> %0,
     i8 %1,
     <vscale x 32 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1110,9 +1112,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8(
   i8,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
@@ -1124,7 +1126,7 @@ entry:
     i8 %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1133,10 +1135,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16>  @intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+define <vscale x 1 x i16>  @intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
@@ -1147,7 +1149,7 @@ entry:
     <vscale x 1 x i16> %0,
     i16 %1,
     <vscale x 1 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1157,9 +1159,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16(
   i16,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
@@ -1171,7 +1173,7 @@ entry:
     i16 %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1180,10 +1182,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16>  @intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+define <vscale x 2 x i16>  @intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
@@ -1194,7 +1196,7 @@ entry:
     <vscale x 2 x i16> %0,
     i16 %1,
     <vscale x 2 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1204,9 +1206,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16(
   i16,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
@@ -1218,7 +1220,7 @@ entry:
     i16 %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1227,10 +1229,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16>  @intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define <vscale x 4 x i16>  @intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
@@ -1241,7 +1243,7 @@ entry:
     <vscale x 4 x i16> %0,
     i16 %1,
     <vscale x 4 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1251,9 +1253,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16(
   i16,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
@@ -1265,7 +1267,7 @@ entry:
     i16 %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1274,10 +1276,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16>  @intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+define <vscale x 8 x i16>  @intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, ma
@@ -1288,7 +1290,7 @@ entry:
     <vscale x 8 x i16> %0,
     i16 %1,
     <vscale x 8 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1298,9 +1300,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16(
   i16,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
@@ -1312,7 +1314,7 @@ entry:
     i16 %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1321,10 +1323,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16>  @intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+define <vscale x 16 x i16>  @intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, ma
@@ -1335,7 +1337,7 @@ entry:
     <vscale x 16 x i16> %0,
     i16 %1,
     <vscale x 16 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1345,9 +1347,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16(
   i16,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
@@ -1359,7 +1361,7 @@ entry:
     i16 %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1368,10 +1370,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32>  @intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+define <vscale x 1 x i32>  @intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
@@ -1382,7 +1384,7 @@ entry:
     <vscale x 1 x i32> %0,
     i32 %1,
     <vscale x 1 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1392,9 +1394,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32(
   i32,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
@@ -1406,7 +1408,7 @@ entry:
     i32 %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1415,10 +1417,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32>  @intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+define <vscale x 2 x i32>  @intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
@@ -1429,7 +1431,7 @@ entry:
     <vscale x 2 x i32> %0,
     i32 %1,
     <vscale x 2 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1439,9 +1441,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32(
   i32,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
@@ -1453,7 +1455,7 @@ entry:
     i32 %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1462,10 +1464,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32>  @intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+define <vscale x 4 x i32>  @intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
@@ -1476,7 +1478,7 @@ entry:
     <vscale x 4 x i32> %0,
     i32 %1,
     <vscale x 4 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1486,9 +1488,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32(
   i32,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
@@ -1500,7 +1502,7 @@ entry:
     i32 %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1509,10 +1511,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32>  @intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+define <vscale x 8 x i32>  @intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, ma
@@ -1523,7 +1525,7 @@ entry:
     <vscale x 8 x i32> %0,
     i32 %1,
     <vscale x 8 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1533,9 +1535,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32(
   i32,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
@@ -1547,7 +1549,7 @@ entry:
     i32 %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1556,28 +1558,34 @@ declare <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
   <vscale x 1 x i64>,
-  i32,
-  i32);
-
-define <vscale x 1 x i64>  @intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vsetvli zero, zero, e64, m1, tu, ma
-; CHECK-NEXT:    vnmsac.vv v8, v10, v9
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i64>  @intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vsetvli zero, zero, e64, m1, tu, ma
+; RV32-NEXT:    vnmsac.vv v8, v10, v9
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
+; RV64-NEXT:    vnmsac.vx v8, a0, v9
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 %1,
     <vscale x 1 x i64> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1587,28 +1595,34 @@ declare <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64(
   i64,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32, i32);
-
-define <vscale x 1 x i64> @intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vsetvli zero, zero, e64, m1, tu, mu
-; CHECK-NEXT:    vnmsac.vv v8, v10, v9, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen, iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vsetvli zero, zero, e64, m1, tu, mu
+; RV32-NEXT:    vnmsac.vv v8, v10, v9, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
+; RV64-NEXT:    vnmsac.vx v8, a0, v9, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1617,28 +1631,34 @@ declare <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
   <vscale x 2 x i64>,
-  i32,
-  i32);
-
-define <vscale x 2 x i64>  @intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vsetvli zero, zero, e64, m2, tu, ma
-; CHECK-NEXT:    vnmsac.vv v8, v12, v10
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i64>  @intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vsetvli zero, zero, e64, m2, tu, ma
+; RV32-NEXT:    vnmsac.vv v8, v12, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, tu, ma
+; RV64-NEXT:    vnmsac.vx v8, a0, v10
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 %1,
     <vscale x 2 x i64> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1648,28 +1668,34 @@ declare <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64(
   i64,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32, i32);
-
-define <vscale x 2 x i64> @intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vsetvli zero, zero, e64, m2, tu, mu
-; CHECK-NEXT:    vnmsac.vv v8, v12, v10, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen, iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vsetvli zero, zero, e64, m2, tu, mu
+; RV32-NEXT:    vnmsac.vv v8, v12, v10, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
+; RV64-NEXT:    vnmsac.vx v8, a0, v10, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1678,28 +1704,34 @@ declare <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
   <vscale x 4 x i64>,
-  i32,
-  i32);
-
-define <vscale x 4 x i64>  @intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vsetvli zero, zero, e64, m4, tu, ma
-; CHECK-NEXT:    vnmsac.vv v8, v16, v12
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i64>  @intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vsetvli zero, zero, e64, m4, tu, ma
+; RV32-NEXT:    vnmsac.vv v8, v16, v12
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, tu, ma
+; RV64-NEXT:    vnmsac.vx v8, a0, v12
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 %1,
     <vscale x 4 x i64> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1709,28 +1741,34 @@ declare <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64(
   i64,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32, i32);
-
-define <vscale x 4 x i64> @intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vsetvli zero, zero, e64, m4, tu, mu
-; CHECK-NEXT:    vnmsac.vv v8, v16, v12, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen, iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vsetvli zero, zero, e64, m4, tu, mu
+; RV32-NEXT:    vnmsac.vv v8, v16, v12, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
+; RV64-NEXT:    vnmsac.vx v8, a0, v12, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll
deleted file mode 100644
index da77b800f5bc8..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll
+++ /dev/null
@@ -1,1694 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8>  @intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
-; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i8>  @intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8>  @intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
-; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i8>  @intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
-; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8>  @intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
-; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i8>  @intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
-; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8>  @intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
-; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64, i64);
-
-define <vscale x 8 x i8>  @intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
-; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8>  @intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
-; CHECK-NEXT:    vnmsub.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64, i64);
-
-define <vscale x 16 x i8>  @intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
-; CHECK-NEXT:    vnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8>  @intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
-; CHECK-NEXT:    vnmsub.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64, i64);
-
-define <vscale x 32 x i8>  @intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
-; CHECK-NEXT:    vnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16>  @intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
-; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i16>  @intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16>  @intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
-; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i16>  @intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16>  @intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
-; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i16>  @intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16>  @intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
-; CHECK-NEXT:    vnmsub.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64, i64);
-
-define <vscale x 8 x i16>  @intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16>  @intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
-; CHECK-NEXT:    vnmsub.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64, i64);
-
-define <vscale x 16 x i16>  @intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32>  @intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
-; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i32>  @intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32>  @intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
-; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i32>  @intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32>  @intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
-; CHECK-NEXT:    vnmsub.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i32>  @intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32>  @intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
-; CHECK-NEXT:    vnmsub.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64, i64);
-
-define <vscale x 8 x i32>  @intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64>  @intrinsic_vnmsub_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
-; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i64>  @intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64>  @intrinsic_vnmsub_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
-; CHECK-NEXT:    vnmsub.vv v8, v10, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i64>  @intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64>  @intrinsic_vnmsub_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
-; CHECK-NEXT:    vnmsub.vv v8, v12, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i64>  @intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i8>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8>  @intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
-; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    <vscale x 1 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i8> @intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
-; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i8>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8>  @intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
-; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    <vscale x 2 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i8> @intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
-; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i8>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8>  @intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, ma
-; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    <vscale x 4 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i8> @intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
-; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i8>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8>  @intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, ma
-; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    <vscale x 8 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64, i64);
-
-define <vscale x 8 x i8> @intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
-; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i8>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8>  @intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, ma
-; CHECK-NEXT:    vnmsub.vx v8, a0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    <vscale x 16 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64, i64);
-
-define <vscale x 16 x i8> @intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
-; CHECK-NEXT:    vnmsub.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i8>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8>  @intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, ma
-; CHECK-NEXT:    vnmsub.vx v8, a0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    <vscale x 32 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64, i64);
-
-define <vscale x 32 x i8> @intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
-; CHECK-NEXT:    vnmsub.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i16>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16>  @intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
-; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    <vscale x 1 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i16> @intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
-; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i16>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16>  @intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
-; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    <vscale x 2 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i16> @intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
-; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i16>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16>  @intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
-; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    <vscale x 4 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i16> @intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
-; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i16>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16>  @intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, ma
-; CHECK-NEXT:    vnmsub.vx v8, a0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    <vscale x 8 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64, i64);
-
-define <vscale x 8 x i16> @intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
-; CHECK-NEXT:    vnmsub.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i16>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16>  @intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, ma
-; CHECK-NEXT:    vnmsub.vx v8, a0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    <vscale x 16 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64, i64);
-
-define <vscale x 16 x i16> @intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
-; CHECK-NEXT:    vnmsub.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i32>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32>  @intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
-; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    <vscale x 1 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i32> @intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i32>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32>  @intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
-; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    <vscale x 2 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i32> @intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i32>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32>  @intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
-; CHECK-NEXT:    vnmsub.vx v8, a0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    <vscale x 4 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i32> @intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vnmsub.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i32>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32>  @intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, ma
-; CHECK-NEXT:    vnmsub.vx v8, a0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    <vscale x 8 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64, i64);
-
-define <vscale x 8 x i32> @intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vnmsub.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  i64,
-  <vscale x 1 x i64>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64>  @intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
-; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    i64 %1,
-    <vscale x 1 x i64> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  i64,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i64> @intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    i64 %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  i64,
-  <vscale x 2 x i64>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64>  @intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, ma
-; CHECK-NEXT:    vnmsub.vx v8, a0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    i64 %1,
-    <vscale x 2 x i64> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  i64,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i64> @intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vnmsub.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    i64 %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  i64,
-  <vscale x 4 x i64>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64>  @intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, ma
-; CHECK-NEXT:    vnmsub.vx v8, a0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    i64 %1,
-    <vscale x 4 x i64> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  i64,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i64> @intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vnmsub.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    i64 %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsub.ll
similarity index 80%
rename from llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vnmsub.ll
index c24c63f6caf57..4007702b3a150 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnmsub.ll
@@ -1,14 +1,16 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
 declare <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8>  @intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+define <vscale x 1 x i8>  @intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
@@ -19,7 +21,7 @@ entry:
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -29,9 +31,9 @@ declare <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i8>  @intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8>  @intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
@@ -43,7 +45,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -52,10 +54,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8>  @intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+define <vscale x 2 x i8>  @intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
@@ -66,7 +68,7 @@ entry:
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -76,9 +78,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i8>  @intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8>  @intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
@@ -90,7 +92,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -99,10 +101,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8>  @intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+define <vscale x 4 x i8>  @intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
@@ -113,7 +115,7 @@ entry:
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -123,9 +125,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i8>  @intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8>  @intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
@@ -137,7 +139,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -146,10 +148,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8>  @intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+define <vscale x 8 x i8>  @intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
@@ -160,7 +162,7 @@ entry:
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -170,9 +172,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 8 x i8>  @intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8>  @intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
@@ -184,7 +186,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -193,10 +195,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8>  @intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+define <vscale x 16 x i8>  @intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
@@ -207,7 +209,7 @@ entry:
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -217,9 +219,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 16 x i8>  @intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8>  @intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
@@ -231,7 +233,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -240,10 +242,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8>  @intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+define <vscale x 32 x i8>  @intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
@@ -254,7 +256,7 @@ entry:
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -264,9 +266,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 32 x i8>  @intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8>  @intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
@@ -278,7 +280,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -287,10 +289,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16>  @intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+define <vscale x 1 x i16>  @intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
@@ -301,7 +303,7 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -311,9 +313,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i16>  @intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16>  @intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -325,7 +327,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -334,10 +336,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16>  @intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+define <vscale x 2 x i16>  @intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
@@ -348,7 +350,7 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -358,9 +360,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i16>  @intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16>  @intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -372,7 +374,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -381,10 +383,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16>  @intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define <vscale x 4 x i16>  @intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
@@ -395,7 +397,7 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -405,9 +407,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i16>  @intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16>  @intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -419,7 +421,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -428,10 +430,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16>  @intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+define <vscale x 8 x i16>  @intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
@@ -442,7 +444,7 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -452,9 +454,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 8 x i16>  @intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16>  @intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -466,7 +468,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -475,10 +477,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16>  @intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+define <vscale x 16 x i16>  @intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
@@ -489,7 +491,7 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -499,9 +501,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 16 x i16>  @intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16>  @intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -513,7 +515,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -522,10 +524,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32>  @intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+define <vscale x 1 x i32>  @intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
@@ -536,7 +538,7 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -546,9 +548,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i32>  @intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32>  @intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -560,7 +562,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -569,10 +571,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32>  @intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+define <vscale x 2 x i32>  @intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
@@ -583,7 +585,7 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -593,9 +595,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i32>  @intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32>  @intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -607,7 +609,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -616,10 +618,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32>  @intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+define <vscale x 4 x i32>  @intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
@@ -630,7 +632,7 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -640,9 +642,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i32>  @intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32>  @intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -654,7 +656,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -663,10 +665,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32>  @intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+define <vscale x 8 x i32>  @intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
@@ -677,7 +679,7 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -687,9 +689,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 8 x i32>  @intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32>  @intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -701,7 +703,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -710,10 +712,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64>  @intrinsic_vnmsub_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+define <vscale x 1 x i64>  @intrinsic_vnmsub_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
@@ -724,7 +726,7 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -734,9 +736,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i64>  @intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64>  @intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -748,7 +750,7 @@ entry:
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -757,10 +759,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64>  @intrinsic_vnmsub_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+define <vscale x 2 x i64>  @intrinsic_vnmsub_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
@@ -771,7 +773,7 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -781,9 +783,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i64>  @intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64>  @intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -795,7 +797,7 @@ entry:
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -804,10 +806,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64>  @intrinsic_vnmsub_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+define <vscale x 4 x i64>  @intrinsic_vnmsub_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
@@ -818,7 +820,7 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -828,9 +830,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i64>  @intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64>  @intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -842,7 +844,7 @@ entry:
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -851,10 +853,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8>  @intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+define <vscale x 1 x i8>  @intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
@@ -865,7 +867,7 @@ entry:
     <vscale x 1 x i8> %0,
     i8 %1,
     <vscale x 1 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -875,9 +877,9 @@ declare <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8(
   i8,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
@@ -889,7 +891,7 @@ entry:
     i8 %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -898,10 +900,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8>  @intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+define <vscale x 2 x i8>  @intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
@@ -912,7 +914,7 @@ entry:
     <vscale x 2 x i8> %0,
     i8 %1,
     <vscale x 2 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -922,9 +924,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8(
   i8,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
@@ -936,7 +938,7 @@ entry:
     i8 %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -945,10 +947,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8>  @intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+define <vscale x 4 x i8>  @intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, ma
@@ -959,7 +961,7 @@ entry:
     <vscale x 4 x i8> %0,
     i8 %1,
     <vscale x 4 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -969,9 +971,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8(
   i8,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
@@ -983,7 +985,7 @@ entry:
     i8 %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -992,10 +994,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8>  @intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+define <vscale x 8 x i8>  @intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, ma
@@ -1006,7 +1008,7 @@ entry:
     <vscale x 8 x i8> %0,
     i8 %1,
     <vscale x 8 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1016,9 +1018,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8(
   i8,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
@@ -1030,7 +1032,7 @@ entry:
     i8 %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1039,10 +1041,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8>  @intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+define <vscale x 16 x i8>  @intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, ma
@@ -1053,7 +1055,7 @@ entry:
     <vscale x 16 x i8> %0,
     i8 %1,
     <vscale x 16 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1063,9 +1065,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8(
   i8,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
@@ -1077,7 +1079,7 @@ entry:
     i8 %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1086,10 +1088,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8>  @intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+define <vscale x 32 x i8>  @intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, ma
@@ -1100,7 +1102,7 @@ entry:
     <vscale x 32 x i8> %0,
     i8 %1,
     <vscale x 32 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1110,9 +1112,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8(
   i8,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
@@ -1124,7 +1126,7 @@ entry:
     i8 %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1133,10 +1135,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16>  @intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+define <vscale x 1 x i16>  @intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
@@ -1147,7 +1149,7 @@ entry:
     <vscale x 1 x i16> %0,
     i16 %1,
     <vscale x 1 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1157,9 +1159,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16(
   i16,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
@@ -1171,7 +1173,7 @@ entry:
     i16 %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1180,10 +1182,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16>  @intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+define <vscale x 2 x i16>  @intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
@@ -1194,7 +1196,7 @@ entry:
     <vscale x 2 x i16> %0,
     i16 %1,
     <vscale x 2 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1204,9 +1206,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16(
   i16,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
@@ -1218,7 +1220,7 @@ entry:
     i16 %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1227,10 +1229,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16>  @intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define <vscale x 4 x i16>  @intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
@@ -1241,7 +1243,7 @@ entry:
     <vscale x 4 x i16> %0,
     i16 %1,
     <vscale x 4 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1251,9 +1253,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16(
   i16,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
@@ -1265,7 +1267,7 @@ entry:
     i16 %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1274,10 +1276,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16>  @intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+define <vscale x 8 x i16>  @intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, ma
@@ -1288,7 +1290,7 @@ entry:
     <vscale x 8 x i16> %0,
     i16 %1,
     <vscale x 8 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1298,9 +1300,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16(
   i16,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
@@ -1312,7 +1314,7 @@ entry:
     i16 %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1321,10 +1323,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16>  @intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+define <vscale x 16 x i16>  @intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, ma
@@ -1335,7 +1337,7 @@ entry:
     <vscale x 16 x i16> %0,
     i16 %1,
     <vscale x 16 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1345,9 +1347,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16(
   i16,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
@@ -1359,7 +1361,7 @@ entry:
     i16 %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1368,10 +1370,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32>  @intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+define <vscale x 1 x i32>  @intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
@@ -1382,7 +1384,7 @@ entry:
     <vscale x 1 x i32> %0,
     i32 %1,
     <vscale x 1 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1392,9 +1394,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32(
   i32,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
@@ -1406,7 +1408,7 @@ entry:
     i32 %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1415,10 +1417,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32>  @intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+define <vscale x 2 x i32>  @intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
@@ -1429,7 +1431,7 @@ entry:
     <vscale x 2 x i32> %0,
     i32 %1,
     <vscale x 2 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1439,9 +1441,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32(
   i32,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
@@ -1453,7 +1455,7 @@ entry:
     i32 %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1462,10 +1464,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32>  @intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+define <vscale x 4 x i32>  @intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
@@ -1476,7 +1478,7 @@ entry:
     <vscale x 4 x i32> %0,
     i32 %1,
     <vscale x 4 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1486,9 +1488,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32(
   i32,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
@@ -1500,7 +1502,7 @@ entry:
     i32 %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1509,10 +1511,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32>  @intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+define <vscale x 8 x i32>  @intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, ma
@@ -1523,7 +1525,7 @@ entry:
     <vscale x 8 x i32> %0,
     i32 %1,
     <vscale x 8 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1533,9 +1535,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32(
   i32,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
@@ -1547,7 +1549,7 @@ entry:
     i32 %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1556,28 +1558,34 @@ declare <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
   <vscale x 1 x i64>,
-  i32,
-  i32);
-
-define <vscale x 1 x i64>  @intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vsetvli zero, zero, e64, m1, tu, ma
-; CHECK-NEXT:    vnmsub.vv v8, v10, v9
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i64>  @intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vsetvli zero, zero, e64, m1, tu, ma
+; RV32-NEXT:    vnmsub.vv v8, v10, v9
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
+; RV64-NEXT:    vnmsub.vx v8, a0, v9
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 %1,
     <vscale x 1 x i64> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1587,28 +1595,34 @@ declare <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64(
   i64,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32, i32);
-
-define <vscale x 1 x i64> @intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vsetvli zero, zero, e64, m1, tu, mu
-; CHECK-NEXT:    vnmsub.vv v8, v10, v9, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen, iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vsetvli zero, zero, e64, m1, tu, mu
+; RV32-NEXT:    vnmsub.vv v8, v10, v9, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
+; RV64-NEXT:    vnmsub.vx v8, a0, v9, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1617,28 +1631,34 @@ declare <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
   <vscale x 2 x i64>,
-  i32,
-  i32);
-
-define <vscale x 2 x i64>  @intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vsetvli zero, zero, e64, m2, tu, ma
-; CHECK-NEXT:    vnmsub.vv v8, v12, v10
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i64>  @intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vsetvli zero, zero, e64, m2, tu, ma
+; RV32-NEXT:    vnmsub.vv v8, v12, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, tu, ma
+; RV64-NEXT:    vnmsub.vx v8, a0, v10
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 %1,
     <vscale x 2 x i64> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1648,28 +1668,34 @@ declare <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64(
   i64,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32, i32);
-
-define <vscale x 2 x i64> @intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vsetvli zero, zero, e64, m2, tu, mu
-; CHECK-NEXT:    vnmsub.vv v8, v12, v10, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen, iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vsetvli zero, zero, e64, m2, tu, mu
+; RV32-NEXT:    vnmsub.vv v8, v12, v10, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
+; RV64-NEXT:    vnmsub.vx v8, a0, v10, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1678,28 +1704,34 @@ declare <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
   <vscale x 4 x i64>,
-  i32,
-  i32);
-
-define <vscale x 4 x i64>  @intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vsetvli zero, zero, e64, m4, tu, ma
-; CHECK-NEXT:    vnmsub.vv v8, v16, v12
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i64>  @intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vsetvli zero, zero, e64, m4, tu, ma
+; RV32-NEXT:    vnmsub.vv v8, v16, v12
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, tu, ma
+; RV64-NEXT:    vnmsub.vx v8, a0, v12
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 %1,
     <vscale x 4 x i64> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1709,28 +1741,34 @@ declare <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64(
   i64,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32, i32);
-
-define <vscale x 4 x i64> @intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vsetvli zero, zero, e64, m4, tu, mu
-; CHECK-NEXT:    vnmsub.vv v8, v16, v12, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen, iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vsetvli zero, zero, e64, m4, tu, mu
+; RV32-NEXT:    vnmsub.vv v8, v16, v12, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
+; RV64-NEXT:    vnmsub.vx v8, a0, v12, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll
deleted file mode 100644
index 14dddc9612042..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll
+++ /dev/null
@@ -1,2848 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i32);
-
-define <vscale x 1 x i8> @intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i8> @intrinsic_vor_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i32);
-
-define <vscale x 2 x i8> @intrinsic_vor_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i8> @intrinsic_vor_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i32);
-
-define <vscale x 4 x i8> @intrinsic_vor_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i8> @intrinsic_vor_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i32);
-
-define <vscale x 8 x i8> @intrinsic_vor_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i8> @intrinsic_vor_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i32);
-
-define <vscale x 16 x i8> @intrinsic_vor_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vor.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i8> @intrinsic_vor_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i32);
-
-define <vscale x 32 x i8> @intrinsic_vor_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vor.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i32,
-  i32);
-
-define <vscale x 32 x i8> @intrinsic_vor_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i32);
-
-define <vscale x 64 x i8> @intrinsic_vor_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vor.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i1>,
-  i32,
-  i32);
-
-define <vscale x 64 x i8> @intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8r.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    <vscale x 64 x i8> %2,
-    <vscale x 64 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i32);
-
-define <vscale x 1 x i16> @intrinsic_vor_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i16> @intrinsic_vor_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i32);
-
-define <vscale x 2 x i16> @intrinsic_vor_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i16> @intrinsic_vor_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vor_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vor_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i32);
-
-define <vscale x 8 x i16> @intrinsic_vor_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vor.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i16> @intrinsic_vor_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i32);
-
-define <vscale x 16 x i16> @intrinsic_vor_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vor.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i16> @intrinsic_vor_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i32);
-
-define <vscale x 32 x i16> @intrinsic_vor_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vor.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i1>,
-  i32,
-  i32);
-
-define <vscale x 32 x i16> @intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re16.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    <vscale x 32 x i16> %2,
-    <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vor_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vor_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vor_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vor_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vor_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vor.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vor_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vor_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vor.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vor_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vor_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vor.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re32.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vor_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vor_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vor_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vor.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vor_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vor_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vor.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vor_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vor_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT:    vor.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re64.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i8,
-  i32);
-
-define <vscale x 1 x i8> @intrinsic_vor_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i8> @intrinsic_vor_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i8 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i8,
-  i32);
-
-define <vscale x 2 x i8> @intrinsic_vor_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv2i8_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i8> @intrinsic_vor_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i8_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i8 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i8,
-  i32);
-
-define <vscale x 4 x i8> @intrinsic_vor_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv4i8_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i8> @intrinsic_vor_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i8_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i8 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i8,
-  i32);
-
-define <vscale x 8 x i8> @intrinsic_vor_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv8i8_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i8> @intrinsic_vor_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i8_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i8 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i8,
-  i32);
-
-define <vscale x 16 x i8> @intrinsic_vor_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv16i8_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i8> @intrinsic_vor_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i8_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vor.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i8 %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i8,
-  i32);
-
-define <vscale x 32 x i8> @intrinsic_vor_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv32i8_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i1>,
-  i32,
-  i32);
-
-define <vscale x 32 x i8> @intrinsic_vor_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv32i8_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vor.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i8 %2,
-    <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i8,
-  i32);
-
-define <vscale x 64 x i8> @intrinsic_vor_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv64i8_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i8,
-  <vscale x 64 x i1>,
-  i32,
-  i32);
-
-define <vscale x 64 x i8> @intrinsic_vor_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv64i8_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vor.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    i8 %2,
-    <vscale x 64 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i16,
-  i32);
-
-define <vscale x 1 x i16> @intrinsic_vor_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv1i16_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i16> @intrinsic_vor_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i16_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i16 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i16,
-  i32);
-
-define <vscale x 2 x i16> @intrinsic_vor_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv2i16_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i16> @intrinsic_vor_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i16_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i16 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i16,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vor_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv4i16_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vor_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i16_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i16 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i16,
-  i32);
-
-define <vscale x 8 x i16> @intrinsic_vor_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv8i16_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i16> @intrinsic_vor_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i16_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vor.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i16 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i16,
-  i32);
-
-define <vscale x 16 x i16> @intrinsic_vor_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv16i16_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i16> @intrinsic_vor_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i16_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vor.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i16 %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i16,
-  i32);
-
-define <vscale x 32 x i16> @intrinsic_vor_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv32i16_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i16,
-  <vscale x 32 x i1>,
-  i32,
-  i32);
-
-define <vscale x 32 x i16> @intrinsic_vor_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv32i16_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vor.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i16 %2,
-    <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vor_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv1i32_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vor_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i32_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vor_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv2i32_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vor_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i32_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vor_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv4i32_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vor_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i32_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vor.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vor_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv8i32_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vor_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i32_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vor.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vor_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv16i32_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vor_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i32_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vor.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i32 %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vor_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT:    vlse64.v v9, (a0), zero
-; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    i64 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vor_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vor_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vor.vv v8, v8, v10
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    i64 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vor_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vor_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vor.vv v8, v8, v12
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    i64 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vor_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vor_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vor.vv v8, v8, v16
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    i64 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vor_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
-; CHECK-NEXT:    vlse64.v v24, (a0), zero
-; CHECK-NEXT:    vor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i64 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vor_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    i8 9,
-    i32 %1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vor_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i8 9,
-    <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-define <vscale x 2 x i8> @intrinsic_vor_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv2i8_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    i8 9,
-    i32 %1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-define <vscale x 2 x i8> @intrinsic_vor_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i8_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i8 9,
-    <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-define <vscale x 4 x i8> @intrinsic_vor_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv4i8_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    i8 9,
-    i32 %1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-define <vscale x 4 x i8> @intrinsic_vor_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i8_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i8 9,
-    <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-define <vscale x 8 x i8> @intrinsic_vor_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv8i8_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    i8 9,
-    i32 %1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-define <vscale x 8 x i8> @intrinsic_vor_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i8_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i8 9,
-    <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-define <vscale x 16 x i8> @intrinsic_vor_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv16i8_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    i8 9,
-    i32 %1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-define <vscale x 16 x i8> @intrinsic_vor_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv16i8_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i8 9,
-    <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-define <vscale x 32 x i8> @intrinsic_vor_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv32i8_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    i8 9,
-    i32 %1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-define <vscale x 32 x i8> @intrinsic_vor_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv32i8_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i8 9,
-    <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-define <vscale x 64 x i8> @intrinsic_vor_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv64i8_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    i8 9,
-    i32 %1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-define <vscale x 64 x i8> @intrinsic_vor_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv64i8_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT:    vor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    i8 9,
-    <vscale x 64 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vor_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv1i16_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    i16 9,
-    i32 %1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vor_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i16_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i16 9,
-    <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vor_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv2i16_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    i16 9,
-    i32 %1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vor_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i16_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i16 9,
-    <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vor_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv4i16_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    i16 9,
-    i32 %1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vor_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i16_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i16 9,
-    <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vor_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv8i16_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    i16 9,
-    i32 %1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vor_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i16_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i16 9,
-    <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vor_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv16i16_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    i16 9,
-    i32 %1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vor_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv16i16_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i16 9,
-    <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vor_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv32i16_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    i16 9,
-    i32 %1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vor_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv32i16_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i16 9,
-    <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vor_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv1i32_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vor_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i32_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i32 9,
-    <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vor_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv2i32_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vor_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i32_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i32 9,
-    <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vor_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv4i32_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vor_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i32_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i32 9,
-    <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vor_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv8i32_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vor_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i32_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i32 9,
-    <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 16 x i32> @intrinsic_vor_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv16i32_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 16 x i32> @intrinsic_vor_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv16i32_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i32 9,
-    <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vor_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    i64 9,
-    i32 %1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vor_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 9,
-    <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vor_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    i64 9,
-    i32 %1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vor_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 9,
-    <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vor_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    i64 9,
-    i32 %1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vor_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 9,
-    <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vor_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vor_vi_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    i64 9,
-    i32 %1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vor_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i64 9,
-    <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vor.ll
similarity index 83%
rename from llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll
rename to llvm/test/CodeGen/RISCV/rvv/vor.ll
index 0c9353c083946..6229e09c3a3dc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vor.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
 declare <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -18,7 +20,7 @@ entry:
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -28,10 +30,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vor_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vor_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -43,7 +45,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -52,9 +54,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vor_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vor_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -65,7 +67,7 @@ entry:
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -75,10 +77,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vor_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vor_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -90,7 +92,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -99,9 +101,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vor_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vor_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -112,7 +114,7 @@ entry:
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -122,10 +124,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vor_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vor_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -137,7 +139,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -146,9 +148,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vor_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vor_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -159,7 +161,7 @@ entry:
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -169,10 +171,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vor_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vor_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -184,7 +186,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -193,9 +195,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vor_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vor_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -206,7 +208,7 @@ entry:
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -216,10 +218,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vor_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vor_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -231,7 +233,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -240,9 +242,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vor_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vor_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -253,7 +255,7 @@ entry:
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
@@ -263,10 +265,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vor_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vor_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -278,7 +280,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -287,9 +289,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vor_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vor_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
@@ -300,7 +302,7 @@ entry:
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i8> %a
 }
@@ -310,10 +312,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8r.v v24, (a0)
@@ -326,7 +328,7 @@ entry:
     <vscale x 64 x i8> %1,
     <vscale x 64 x i8> %2,
     <vscale x 64 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 64 x i8> %a
 }
@@ -335,9 +337,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vor_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vor_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -348,7 +350,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -358,10 +360,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vor_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vor_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -373,7 +375,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -382,9 +384,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vor_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vor_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -395,7 +397,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -405,10 +407,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vor_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vor_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -420,7 +422,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -429,9 +431,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vor_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vor_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -442,7 +444,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -452,10 +454,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vor_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vor_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -467,7 +469,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -476,9 +478,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vor_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vor_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -489,7 +491,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -499,10 +501,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vor_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vor_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -514,7 +516,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -523,9 +525,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vor_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vor_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -536,7 +538,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -546,10 +548,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vor_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vor_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -561,7 +563,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -570,9 +572,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vor_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vor_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
@@ -583,7 +585,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -593,10 +595,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
@@ -609,7 +611,7 @@ entry:
     <vscale x 32 x i16> %1,
     <vscale x 32 x i16> %2,
     <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -618,9 +620,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vor_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vor_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -631,7 +633,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -641,10 +643,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vor_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vor_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -656,7 +658,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -665,9 +667,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vor_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vor_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -678,7 +680,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -688,10 +690,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vor_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vor_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -703,7 +705,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -712,9 +714,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vor_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vor_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -725,7 +727,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -735,10 +737,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vor_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vor_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -750,7 +752,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -759,9 +761,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vor_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vor_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -772,7 +774,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -782,10 +784,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vor_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vor_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -797,7 +799,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -806,9 +808,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vor_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vor_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
@@ -819,7 +821,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -829,10 +831,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
@@ -845,7 +847,7 @@ entry:
     <vscale x 16 x i32> %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -854,9 +856,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vor_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vor_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -867,7 +869,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -877,10 +879,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vor_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vor_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -892,7 +894,7 @@ entry:
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -901,9 +903,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vor_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vor_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -914,7 +916,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -924,10 +926,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vor_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vor_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -939,7 +941,7 @@ entry:
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -948,9 +950,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vor_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vor_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -961,7 +963,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -971,10 +973,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vor_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vor_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -986,7 +988,7 @@ entry:
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -995,9 +997,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vor_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vor_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
@@ -1008,7 +1010,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1018,10 +1020,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
@@ -1034,7 +1036,7 @@ entry:
     <vscale x 8 x i64> %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1043,9 +1045,9 @@ declare <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vor_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vor_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -1056,7 +1058,7 @@ entry:
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -1066,10 +1068,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vor_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vor_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -1081,7 +1083,7 @@ entry:
     <vscale x 1 x i8> %1,
     i8 %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -1090,9 +1092,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vor_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vor_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -1103,7 +1105,7 @@ entry:
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -1113,10 +1115,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vor_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vor_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -1128,7 +1130,7 @@ entry:
     <vscale x 2 x i8> %1,
     i8 %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -1137,9 +1139,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vor_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vor_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -1150,7 +1152,7 @@ entry:
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -1160,10 +1162,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vor_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vor_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -1175,7 +1177,7 @@ entry:
     <vscale x 4 x i8> %1,
     i8 %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -1184,9 +1186,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vor_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vor_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -1197,7 +1199,7 @@ entry:
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1207,10 +1209,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vor_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vor_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -1222,7 +1224,7 @@ entry:
     <vscale x 8 x i8> %1,
     i8 %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1231,9 +1233,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vor_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vor_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -1244,7 +1246,7 @@ entry:
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1254,10 +1256,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vor_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vor_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -1269,7 +1271,7 @@ entry:
     <vscale x 16 x i8> %1,
     i8 %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1278,9 +1280,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vor_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vor_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -1291,7 +1293,7 @@ entry:
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1301,10 +1303,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vor_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vor_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1316,7 +1318,7 @@ entry:
     <vscale x 32 x i8> %1,
     i8 %2,
     <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1325,9 +1327,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vor_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vor_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vx_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
@@ -1338,7 +1340,7 @@ entry:
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i8> %a
 }
@@ -1348,10 +1350,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8(
   <vscale x 64 x i8>,
   i8,
   <vscale x 64 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vor_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vor_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
@@ -1363,7 +1365,7 @@ entry:
     <vscale x 64 x i8> %1,
     i8 %2,
     <vscale x 64 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 64 x i8> %a
 }
@@ -1372,9 +1374,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vor_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vor_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1385,7 +1387,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1395,10 +1397,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vor_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vor_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1410,7 +1412,7 @@ entry:
     <vscale x 1 x i16> %1,
     i16 %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1419,9 +1421,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vor_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vor_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1432,7 +1434,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1442,10 +1444,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vor_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vor_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1457,7 +1459,7 @@ entry:
     <vscale x 2 x i16> %1,
     i16 %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1466,9 +1468,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vor_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vor_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1479,7 +1481,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1489,10 +1491,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vor_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vor_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1504,7 +1506,7 @@ entry:
     <vscale x 4 x i16> %1,
     i16 %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1513,9 +1515,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vor_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vor_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1526,7 +1528,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1536,10 +1538,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vor_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vor_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1551,7 +1553,7 @@ entry:
     <vscale x 8 x i16> %1,
     i16 %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1560,9 +1562,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vor_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vor_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1573,7 +1575,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1583,10 +1585,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vor_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vor_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1598,7 +1600,7 @@ entry:
     <vscale x 16 x i16> %1,
     i16 %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1607,9 +1609,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vor_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vor_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vx_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -1620,7 +1622,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1630,10 +1632,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16(
   <vscale x 32 x i16>,
   i16,
   <vscale x 32 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vor_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vor_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -1645,7 +1647,7 @@ entry:
     <vscale x 32 x i16> %1,
     i16 %2,
     <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1654,9 +1656,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   i32,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vor_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vor_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1667,7 +1669,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
     i32 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1677,10 +1679,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vor_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vor_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1692,7 +1694,7 @@ entry:
     <vscale x 1 x i32> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1701,9 +1703,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   i32,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vor_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vor_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1714,7 +1716,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
     i32 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1724,10 +1726,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vor_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vor_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1739,7 +1741,7 @@ entry:
     <vscale x 2 x i32> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1748,9 +1750,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   i32,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vor_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vor_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1761,7 +1763,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
     i32 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1771,10 +1773,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vor_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vor_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1786,7 +1788,7 @@ entry:
     <vscale x 4 x i32> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1795,9 +1797,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   i32,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vor_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vor_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1808,7 +1810,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
     i32 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1818,10 +1820,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vor_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vor_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1833,7 +1835,7 @@ entry:
     <vscale x 8 x i32> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1842,9 +1844,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   i32,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vor_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vor_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vx_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -1855,7 +1857,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
     i32 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1865,10 +1867,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32(
   <vscale x 16 x i32>,
   i32,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vor_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vor_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vx_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -1880,7 +1882,7 @@ entry:
     <vscale x 16 x i32> %1,
     i32 %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1889,20 +1891,32 @@ declare <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vor_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vor_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vor_vx_nxv1i64_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vor.vv v8, v8, v9
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vor_vx_nxv1i64_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT:    vor.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64(
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
     i64 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1912,22 +1926,34 @@ declare <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
   <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vor_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vor_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vor_mask_vx_nxv1i64_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vor.vv v8, v9, v10, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vor_mask_vx_nxv1i64_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT:    vor.vx v8, v9, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     i64 %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1936,20 +1962,32 @@ declare <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vor_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vor_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vor_vx_nxv2i64_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vor.vv v8, v8, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vor_vx_nxv2i64_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT:    vor.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64(
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
     i64 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1959,22 +1997,34 @@ declare <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
   <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vor_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vor.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vor_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vor_mask_vx_nxv2i64_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vor.vv v8, v10, v12, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vor_mask_vx_nxv2i64_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT:    vor.vx v8, v10, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     i64 %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1983,20 +2033,32 @@ declare <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vor_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vor_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vor_vx_nxv4i64_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vor.vv v8, v8, v12
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vor_vx_nxv4i64_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT:    vor.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64(
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
     i64 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -2006,22 +2068,34 @@ declare <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
   <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vor_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vor.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vor_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vor_mask_vx_nxv4i64_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vor.vv v8, v12, v16, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vor_mask_vx_nxv4i64_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT:    vor.vx v8, v12, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     i64 %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -2030,20 +2104,32 @@ declare <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vor_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vor_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vor_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vor_vx_nxv8i64_nxv8i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vor.vv v8, v8, v16
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vor_vx_nxv8i64_nxv8i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT:    vor.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64(
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
     i64 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -2053,27 +2139,39 @@ declare <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64(
   <vscale x 8 x i64>,
   i64,
   <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vor_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vor.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vor_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vor_mask_vx_nxv8i64_nxv8i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT:    vlse64.v v24, (a0), zero
+; RV32-NEXT:    vor.vv v8, v16, v24, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vor_mask_vx_nxv8i64_nxv8i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT:    vor.vx v8, v16, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
     i64 %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vor_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
+define <vscale x 1 x i8> @intrinsic_vor_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vi_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -2084,12 +2182,12 @@ entry:
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
     i8 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vor_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vor_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -2101,12 +2199,12 @@ entry:
     <vscale x 1 x i8> %1,
     i8 9,
     <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vor_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
+define <vscale x 2 x i8> @intrinsic_vor_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vi_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -2117,12 +2215,12 @@ entry:
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8> %0,
     i8 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vor_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vor_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -2134,12 +2232,12 @@ entry:
     <vscale x 2 x i8> %1,
     i8 9,
     <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vor_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
+define <vscale x 4 x i8> @intrinsic_vor_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vi_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -2150,12 +2248,12 @@ entry:
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8> %0,
     i8 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vor_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vor_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -2167,12 +2265,12 @@ entry:
     <vscale x 4 x i8> %1,
     i8 9,
     <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vor_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
+define <vscale x 8 x i8> @intrinsic_vor_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vi_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -2183,12 +2281,12 @@ entry:
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8> %0,
     i8 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vor_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vor_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -2200,12 +2298,12 @@ entry:
     <vscale x 8 x i8> %1,
     i8 9,
     <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vor_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
+define <vscale x 16 x i8> @intrinsic_vor_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vi_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -2216,12 +2314,12 @@ entry:
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8> %0,
     i8 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vor_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vor_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -2233,12 +2331,12 @@ entry:
     <vscale x 16 x i8> %1,
     i8 9,
     <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vor_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
+define <vscale x 32 x i8> @intrinsic_vor_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vi_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -2249,12 +2347,12 @@ entry:
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8> %0,
     i8 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vor_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vor_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -2266,12 +2364,12 @@ entry:
     <vscale x 32 x i8> %1,
     i8 9,
     <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 64 x i8> @intrinsic_vor_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i64 %1) nounwind {
+define <vscale x 64 x i8> @intrinsic_vor_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vi_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
@@ -2282,12 +2380,12 @@ entry:
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8> %0,
     i8 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 64 x i8> %a
 }
 
-define <vscale x 64 x i8> @intrinsic_vor_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
+define <vscale x 64 x i8> @intrinsic_vor_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
@@ -2299,12 +2397,12 @@ entry:
     <vscale x 64 x i8> %1,
     i8 9,
     <vscale x 64 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 64 x i8> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vor_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
+define <vscale x 1 x i16> @intrinsic_vor_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vi_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -2315,12 +2413,12 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
     i16 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vor_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vor_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -2332,12 +2430,12 @@ entry:
     <vscale x 1 x i16> %1,
     i16 9,
     <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vor_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
+define <vscale x 2 x i16> @intrinsic_vor_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vi_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -2348,12 +2446,12 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
     i16 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vor_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vor_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -2365,12 +2463,12 @@ entry:
     <vscale x 2 x i16> %1,
     i16 9,
     <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vor_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
+define <vscale x 4 x i16> @intrinsic_vor_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vi_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -2381,12 +2479,12 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
     i16 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vor_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vor_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -2398,12 +2496,12 @@ entry:
     <vscale x 4 x i16> %1,
     i16 9,
     <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vor_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
+define <vscale x 8 x i16> @intrinsic_vor_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vi_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -2414,12 +2512,12 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
     i16 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vor_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vor_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -2431,12 +2529,12 @@ entry:
     <vscale x 8 x i16> %1,
     i16 9,
     <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vor_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
+define <vscale x 16 x i16> @intrinsic_vor_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vi_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -2447,12 +2545,12 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
     i16 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vor_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vor_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -2464,12 +2562,12 @@ entry:
     <vscale x 16 x i16> %1,
     i16 9,
     <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 32 x i16> @intrinsic_vor_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i64 %1) nounwind {
+define <vscale x 32 x i16> @intrinsic_vor_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vi_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
@@ -2480,12 +2578,12 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
     i16 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 32 x i16> %a
 }
 
-define <vscale x 32 x i16> @intrinsic_vor_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+define <vscale x 32 x i16> @intrinsic_vor_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -2497,12 +2595,12 @@ entry:
     <vscale x 32 x i16> %1,
     i16 9,
     <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vor_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
+define <vscale x 1 x i32> @intrinsic_vor_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vi_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -2513,12 +2611,12 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
     i32 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vor_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vor_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -2530,12 +2628,12 @@ entry:
     <vscale x 1 x i32> %1,
     i32 9,
     <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vor_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
+define <vscale x 2 x i32> @intrinsic_vor_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vi_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -2546,12 +2644,12 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
     i32 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vor_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vor_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -2563,12 +2661,12 @@ entry:
     <vscale x 2 x i32> %1,
     i32 9,
     <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vor_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
+define <vscale x 4 x i32> @intrinsic_vor_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vi_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -2579,12 +2677,12 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
     i32 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vor_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vor_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -2596,12 +2694,12 @@ entry:
     <vscale x 4 x i32> %1,
     i32 9,
     <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vor_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
+define <vscale x 8 x i32> @intrinsic_vor_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vi_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -2612,12 +2710,12 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
     i32 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vor_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vor_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -2629,12 +2727,12 @@ entry:
     <vscale x 8 x i32> %1,
     i32 9,
     <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 16 x i32> @intrinsic_vor_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i64 %1) nounwind {
+define <vscale x 16 x i32> @intrinsic_vor_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vi_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
@@ -2645,12 +2743,12 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
     i32 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i32> %a
 }
 
-define <vscale x 16 x i32> @intrinsic_vor_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vor_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -2662,12 +2760,12 @@ entry:
     <vscale x 16 x i32> %1,
     i32 9,
     <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
 
-define <vscale x 1 x i64> @intrinsic_vor_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
+define <vscale x 1 x i64> @intrinsic_vor_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vi_nxv1i64_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -2678,12 +2776,12 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
     i64 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i64> %a
 }
 
-define <vscale x 1 x i64> @intrinsic_vor_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vor_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i64_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -2695,12 +2793,12 @@ entry:
     <vscale x 1 x i64> %1,
     i64 9,
     <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
 
-define <vscale x 2 x i64> @intrinsic_vor_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
+define <vscale x 2 x i64> @intrinsic_vor_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vi_nxv2i64_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -2711,12 +2809,12 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
     i64 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i64> %a
 }
 
-define <vscale x 2 x i64> @intrinsic_vor_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vor_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i64_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -2728,12 +2826,12 @@ entry:
     <vscale x 2 x i64> %1,
     i64 9,
     <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
 
-define <vscale x 4 x i64> @intrinsic_vor_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
+define <vscale x 4 x i64> @intrinsic_vor_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vi_nxv4i64_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -2744,12 +2842,12 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
     i64 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i64> %a
 }
 
-define <vscale x 4 x i64> @intrinsic_vor_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vor_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i64_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -2761,12 +2859,12 @@ entry:
     <vscale x 4 x i64> %1,
     i64 9,
     <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
 
-define <vscale x 8 x i64> @intrinsic_vor_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1) nounwind {
+define <vscale x 8 x i64> @intrinsic_vor_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vor_vi_nxv8i64_nxv8i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
@@ -2777,12 +2875,12 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
     i64 9,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i64> %a
 }
 
-define <vscale x 8 x i64> @intrinsic_vor_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vor_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i64_nxv8i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -2794,7 +2892,7 @@ entry:
     <vscale x 8 x i64> %1,
     i64 9,
     <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll
deleted file mode 100644
index 64f6bb4b99798..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll
+++ /dev/null
@@ -1,2122 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i32);
-
-define <vscale x 1 x i8> @intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i8> @intrinsic_vrem_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i32);
-
-define <vscale x 2 x i8> @intrinsic_vrem_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i8> @intrinsic_vrem_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i32);
-
-define <vscale x 4 x i8> @intrinsic_vrem_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i8> @intrinsic_vrem_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i32);
-
-define <vscale x 8 x i8> @intrinsic_vrem_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i8> @intrinsic_vrem_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i32);
-
-define <vscale x 16 x i8> @intrinsic_vrem_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vrem.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i8> @intrinsic_vrem_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vrem.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i32);
-
-define <vscale x 32 x i8> @intrinsic_vrem_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vrem.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i32,
-  i32);
-
-define <vscale x 32 x i8> @intrinsic_vrem_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vrem.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i32);
-
-define <vscale x 64 x i8> @intrinsic_vrem_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vrem.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i1>,
-  i32,
-  i32);
-
-define <vscale x 64 x i8> @intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8r.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vrem.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    <vscale x 64 x i8> %2,
-    <vscale x 64 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i32);
-
-define <vscale x 1 x i16> @intrinsic_vrem_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i16> @intrinsic_vrem_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i32);
-
-define <vscale x 2 x i16> @intrinsic_vrem_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i16> @intrinsic_vrem_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vrem_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vrem_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i32);
-
-define <vscale x 8 x i16> @intrinsic_vrem_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vrem.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i16> @intrinsic_vrem_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vrem.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i32);
-
-define <vscale x 16 x i16> @intrinsic_vrem_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vrem.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i16> @intrinsic_vrem_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vrem.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i32);
-
-define <vscale x 32 x i16> @intrinsic_vrem_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vrem.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i1>,
-  i32,
-  i32);
-
-define <vscale x 32 x i16> @intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re16.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vrem.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    <vscale x 32 x i16> %2,
-    <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vrem_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vrem_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vrem_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vrem_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vrem_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vrem.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vrem_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vrem.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vrem_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vrem.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vrem_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vrem.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vrem_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vrem.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re32.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vrem.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vrem_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vrem_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vrem_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vrem.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vrem_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vrem.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vrem_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vrem.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vrem_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vrem.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vrem_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT:    vrem.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re64.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vrem.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i8,
-  i32);
-
-define <vscale x 1 x i8> @intrinsic_vrem_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i8> @intrinsic_vrem_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i8 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i8,
-  i32);
-
-define <vscale x 2 x i8> @intrinsic_vrem_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv2i8_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i8> @intrinsic_vrem_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i8_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i8 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i8,
-  i32);
-
-define <vscale x 4 x i8> @intrinsic_vrem_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv4i8_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i8> @intrinsic_vrem_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i8_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i8 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i8,
-  i32);
-
-define <vscale x 8 x i8> @intrinsic_vrem_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv8i8_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i8> @intrinsic_vrem_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i8_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i8 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i8,
-  i32);
-
-define <vscale x 16 x i8> @intrinsic_vrem_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv16i8_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i8> @intrinsic_vrem_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i8_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vrem.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i8 %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i8,
-  i32);
-
-define <vscale x 32 x i8> @intrinsic_vrem_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv32i8_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i1>,
-  i32,
-  i32);
-
-define <vscale x 32 x i8> @intrinsic_vrem_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv32i8_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vrem.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i8 %2,
-    <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i8,
-  i32);
-
-define <vscale x 64 x i8> @intrinsic_vrem_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv64i8_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    i8 %1,
-    i32 %2)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i8,
-  <vscale x 64 x i1>,
-  i32,
-  i32);
-
-define <vscale x 64 x i8> @intrinsic_vrem_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv64i8_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vrem.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    i8 %2,
-    <vscale x 64 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i16,
-  i32);
-
-define <vscale x 1 x i16> @intrinsic_vrem_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv1i16_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i16> @intrinsic_vrem_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i16_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i16 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i16,
-  i32);
-
-define <vscale x 2 x i16> @intrinsic_vrem_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv2i16_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i16> @intrinsic_vrem_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i16_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i16 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i16,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vrem_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv4i16_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vrem_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i16_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i16 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i16,
-  i32);
-
-define <vscale x 8 x i16> @intrinsic_vrem_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv8i16_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i16> @intrinsic_vrem_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i16_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vrem.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i16 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i16,
-  i32);
-
-define <vscale x 16 x i16> @intrinsic_vrem_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv16i16_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i16> @intrinsic_vrem_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i16_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vrem.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i16 %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i16,
-  i32);
-
-define <vscale x 32 x i16> @intrinsic_vrem_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv32i16_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    i16 %1,
-    i32 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i16,
-  <vscale x 32 x i1>,
-  i32,
-  i32);
-
-define <vscale x 32 x i16> @intrinsic_vrem_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv32i16_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vrem.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i16 %2,
-    <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vrem_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv1i32_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vrem_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i32_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vrem_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv2i32_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vrem_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i32_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vrem_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv4i32_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vrem_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i32_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vrem.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vrem_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv8i32_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vrem_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i32_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vrem.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vrem_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv16i32_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vrem_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i32_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vrem.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i32 %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vrem_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT:    vlse64.v v9, (a0), zero
-; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    i64 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vrem_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vrem.vv v8, v8, v10
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    i64 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vrem.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vrem_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vrem.vv v8, v8, v12
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    i64 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vrem.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vrem_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vrem.vv v8, v8, v16
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    i64 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
-; CHECK-NEXT:    vlse64.v v24, (a0), zero
-; CHECK-NEXT:    vrem.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i64 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrem.ll
similarity index 80%
rename from llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll
rename to llvm/test/CodeGen/RISCV/rvv/vrem.ll
index afca2626c7f75..30b40f4113fe6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrem.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
 declare <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -18,7 +20,7 @@ entry:
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -28,10 +30,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vrem_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vrem_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -43,7 +45,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -52,9 +54,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vrem_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vrem_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -65,7 +67,7 @@ entry:
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -75,10 +77,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vrem_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vrem_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -90,7 +92,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -99,9 +101,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vrem_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vrem_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -112,7 +114,7 @@ entry:
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -122,10 +124,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vrem_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vrem_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -137,7 +139,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -146,9 +148,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vrem_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vrem_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -159,7 +161,7 @@ entry:
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -169,10 +171,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vrem_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vrem_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -184,7 +186,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -193,9 +195,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vrem_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vrem_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -206,7 +208,7 @@ entry:
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -216,10 +218,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vrem_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vrem_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -231,7 +233,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -240,9 +242,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vrem_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vrem_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -253,7 +255,7 @@ entry:
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
@@ -263,10 +265,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vrem_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vrem_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -278,7 +280,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -287,9 +289,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vrem_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vrem_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
@@ -300,7 +302,7 @@ entry:
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i8> %a
 }
@@ -310,10 +312,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8r.v v24, (a0)
@@ -326,7 +328,7 @@ entry:
     <vscale x 64 x i8> %1,
     <vscale x 64 x i8> %2,
     <vscale x 64 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 64 x i8> %a
 }
@@ -335,9 +337,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vrem_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vrem_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -348,7 +350,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -358,10 +360,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vrem_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vrem_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -373,7 +375,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -382,9 +384,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vrem_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vrem_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -395,7 +397,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -405,10 +407,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vrem_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vrem_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -420,7 +422,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -429,9 +431,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vrem_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vrem_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -442,7 +444,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -452,10 +454,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vrem_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vrem_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -467,7 +469,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -476,9 +478,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vrem_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vrem_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -489,7 +491,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -499,10 +501,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vrem_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vrem_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -514,7 +516,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -523,9 +525,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vrem_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vrem_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -536,7 +538,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -546,10 +548,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vrem_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vrem_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -561,7 +563,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -570,9 +572,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vrem_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vrem_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
@@ -583,7 +585,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -593,10 +595,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
@@ -609,7 +611,7 @@ entry:
     <vscale x 32 x i16> %1,
     <vscale x 32 x i16> %2,
     <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -618,9 +620,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vrem_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vrem_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -631,7 +633,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -641,10 +643,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vrem_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vrem_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -656,7 +658,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -665,9 +667,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vrem_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vrem_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -678,7 +680,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -688,10 +690,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vrem_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vrem_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -703,7 +705,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -712,9 +714,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vrem_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vrem_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -725,7 +727,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -735,10 +737,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vrem_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vrem_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -750,7 +752,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -759,9 +761,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vrem_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vrem_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -772,7 +774,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -782,10 +784,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vrem_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vrem_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -797,7 +799,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -806,9 +808,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vrem_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vrem_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
@@ -819,7 +821,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -829,10 +831,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
@@ -845,7 +847,7 @@ entry:
     <vscale x 16 x i32> %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -854,9 +856,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vrem_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vrem_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -867,7 +869,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -877,10 +879,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vrem_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vrem_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -892,7 +894,7 @@ entry:
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -901,9 +903,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vrem_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vrem_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -914,7 +916,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -924,10 +926,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vrem_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vrem_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -939,7 +941,7 @@ entry:
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -948,9 +950,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vrem_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vrem_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -961,7 +963,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -971,10 +973,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vrem_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vrem_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -986,7 +988,7 @@ entry:
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -995,9 +997,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vrem_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vrem_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
@@ -1008,7 +1010,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1018,10 +1020,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
@@ -1034,7 +1036,7 @@ entry:
     <vscale x 8 x i64> %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1043,9 +1045,9 @@ declare <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vrem_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vrem_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -1056,7 +1058,7 @@ entry:
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -1066,10 +1068,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vrem_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vrem_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -1081,7 +1083,7 @@ entry:
     <vscale x 1 x i8> %1,
     i8 %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -1090,9 +1092,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vrem_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vrem_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -1103,7 +1105,7 @@ entry:
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -1113,10 +1115,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vrem_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vrem_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -1128,7 +1130,7 @@ entry:
     <vscale x 2 x i8> %1,
     i8 %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -1137,9 +1139,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vrem_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vrem_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -1150,7 +1152,7 @@ entry:
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -1160,10 +1162,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vrem_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vrem_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -1175,7 +1177,7 @@ entry:
     <vscale x 4 x i8> %1,
     i8 %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -1184,9 +1186,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vrem_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vrem_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -1197,7 +1199,7 @@ entry:
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1207,10 +1209,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vrem_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vrem_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -1222,7 +1224,7 @@ entry:
     <vscale x 8 x i8> %1,
     i8 %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1231,9 +1233,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vrem_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vrem_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -1244,7 +1246,7 @@ entry:
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1254,10 +1256,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vrem_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vrem_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -1269,7 +1271,7 @@ entry:
     <vscale x 16 x i8> %1,
     i8 %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1278,9 +1280,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vrem_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vrem_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -1291,7 +1293,7 @@ entry:
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1301,10 +1303,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vrem_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vrem_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1316,7 +1318,7 @@ entry:
     <vscale x 32 x i8> %1,
     i8 %2,
     <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1325,9 +1327,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   i8,
-  i64);
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vrem_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vrem_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vx_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
@@ -1338,7 +1340,7 @@ entry:
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8> %0,
     i8 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i8> %a
 }
@@ -1348,10 +1350,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.i8(
   <vscale x 64 x i8>,
   i8,
   <vscale x 64 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vrem_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vrem_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
@@ -1363,7 +1365,7 @@ entry:
     <vscale x 64 x i8> %1,
     i8 %2,
     <vscale x 64 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 64 x i8> %a
 }
@@ -1372,9 +1374,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vrem_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vrem_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1385,7 +1387,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1395,10 +1397,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vrem_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vrem_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1410,7 +1412,7 @@ entry:
     <vscale x 1 x i16> %1,
     i16 %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1419,9 +1421,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vrem_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vrem_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1432,7 +1434,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1442,10 +1444,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vrem_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vrem_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1457,7 +1459,7 @@ entry:
     <vscale x 2 x i16> %1,
     i16 %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1466,9 +1468,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vrem_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vrem_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1479,7 +1481,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1489,10 +1491,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vrem_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vrem_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1504,7 +1506,7 @@ entry:
     <vscale x 4 x i16> %1,
     i16 %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1513,9 +1515,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vrem_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vrem_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1526,7 +1528,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1536,10 +1538,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vrem_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vrem_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1551,7 +1553,7 @@ entry:
     <vscale x 8 x i16> %1,
     i16 %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1560,9 +1562,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vrem_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vrem_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1573,7 +1575,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1583,10 +1585,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vrem_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vrem_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1598,7 +1600,7 @@ entry:
     <vscale x 16 x i16> %1,
     i16 %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1607,9 +1609,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   i16,
-  i64);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vrem_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vrem_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vx_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -1620,7 +1622,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
     i16 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1630,10 +1632,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.i16(
   <vscale x 32 x i16>,
   i16,
   <vscale x 32 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vrem_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vrem_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -1645,7 +1647,7 @@ entry:
     <vscale x 32 x i16> %1,
     i16 %2,
     <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1654,9 +1656,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   i32,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vrem_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vrem_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1667,7 +1669,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
     i32 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1677,10 +1679,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vrem_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vrem_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1692,7 +1694,7 @@ entry:
     <vscale x 1 x i32> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1701,9 +1703,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   i32,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vrem_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vrem_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1714,7 +1716,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
     i32 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1724,10 +1726,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vrem_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vrem_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1739,7 +1741,7 @@ entry:
     <vscale x 2 x i32> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1748,9 +1750,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   i32,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vrem_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vrem_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1761,7 +1763,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
     i32 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1771,10 +1773,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vrem_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vrem_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1786,7 +1788,7 @@ entry:
     <vscale x 4 x i32> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1795,9 +1797,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   i32,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vrem_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vrem_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1808,7 +1810,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
     i32 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1818,10 +1820,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vrem_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vrem_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1833,7 +1835,7 @@ entry:
     <vscale x 8 x i32> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1842,9 +1844,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   i32,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vrem_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vrem_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_vx_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -1855,7 +1857,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
     i32 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1865,10 +1867,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.i32(
   <vscale x 16 x i32>,
   i32,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vrem_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vrem_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -1880,7 +1882,7 @@ entry:
     <vscale x 16 x i32> %1,
     i32 %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1889,20 +1891,32 @@ declare <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vrem_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vrem_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vrem_vx_nxv1i64_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vrem.vv v8, v8, v9
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vrem_vx_nxv1i64_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT:    vrem.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.i64(
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
     i64 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1912,22 +1926,34 @@ declare <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
   <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vrem.vv v8, v9, v10, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT:    vrem.vx v8, v9, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     i64 %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1936,20 +1962,32 @@ declare <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vrem_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vrem_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vrem_vx_nxv2i64_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vrem.vv v8, v8, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vrem_vx_nxv2i64_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT:    vrem.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.i64(
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
     i64 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1959,22 +1997,34 @@ declare <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
   <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vrem.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vrem.vv v8, v10, v12, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT:    vrem.vx v8, v10, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     i64 %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1983,20 +2033,32 @@ declare <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vrem_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vrem_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vrem_vx_nxv4i64_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vrem.vv v8, v8, v12
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vrem_vx_nxv4i64_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT:    vrem.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.i64(
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
     i64 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -2006,22 +2068,34 @@ declare <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
   <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vrem.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vrem.vv v8, v12, v16, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT:    vrem.vx v8, v12, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     i64 %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -2030,20 +2104,32 @@ declare <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vrem_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vrem_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vrem_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vrem_vx_nxv8i64_nxv8i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vrem.vv v8, v8, v16
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vrem_vx_nxv8i64_nxv8i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT:    vrem.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.i64(
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
     i64 %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -2053,22 +2139,34 @@ declare <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.i64(
   <vscale x 8 x i64>,
   i64,
   <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vrem.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT:    vlse64.v v24, (a0), zero
+; RV32-NEXT:    vrem.vv v8, v16, v24, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT:    vrem.vx v8, v16, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
     i64 %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll
deleted file mode 100644
index eeb71ec6a57b2..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll
+++ /dev/null
@@ -1,2074 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vremu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vremu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vremu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vremu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vremu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vremu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vremu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vremu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vremu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vremu.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vremu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vremu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vremu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vremu.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vremu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vremu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vremu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vremu.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i1>,
-  i64,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8r.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vremu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    <vscale x 64 x i8> %2,
-    <vscale x 64 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vremu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vremu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vremu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vremu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vremu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vremu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vremu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vremu.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vremu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vremu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vremu_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vremu.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vremu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vremu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vremu_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vremu.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re16.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vremu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    <vscale x 32 x i16> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vremu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vremu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vremu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vremu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vremu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vremu.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vremu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vremu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vremu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vremu.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vremu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vremu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vremu_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vremu.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re32.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vremu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vremu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vremu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vremu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vremu.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vremu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vremu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vremu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vremu.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vremu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vremu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vremu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT:    vremu.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re64.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vremu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i8,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vremu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vremu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i8 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i8,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vremu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv2i8_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vremu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i8_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i8 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i8,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vremu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv4i8_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vremu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i8_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i8 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i8,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vremu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv8i8_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vremu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i8_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i8 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i8,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vremu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv16i8_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vremu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i8_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vremu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i8 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i8,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vremu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv32i8_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vremu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv32i8_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vremu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i8 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i8,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vremu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv64i8_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i8,
-  <vscale x 64 x i1>,
-  i64,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vremu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv64i8_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vremu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    i8 %2,
-    <vscale x 64 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i16,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vremu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv1i16_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vremu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i16_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i16 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i16,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vremu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv2i16_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vremu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i16_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i16 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i16,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vremu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv4i16_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vremu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i16_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i16 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i16,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vremu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv8i16_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vremu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i16_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vremu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i16 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i16,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vremu_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv16i16_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vremu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i16_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vremu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i16 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i16,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vremu_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv32i16_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i16,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vremu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv32i16_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vremu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i16 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vremu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv1i32_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vremu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i32_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vremu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv2i32_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vremu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i32_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vremu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv4i32_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vremu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i32_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vremu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vremu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv8i32_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vremu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i32_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vremu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vremu_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv16i32_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vremu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i32_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vremu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i32 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vremu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vremu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vremu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vremu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vremu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vremu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vremu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i64 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vremu.ll
similarity index 80%
rename from llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vremu.ll
index 048333fc78fcc..39cd411d14119 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vremu.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
 declare <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vremu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vremu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -18,7 +20,7 @@ entry:
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -28,10 +30,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vremu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vremu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -43,7 +45,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -52,9 +54,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vremu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vremu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -65,7 +67,7 @@ entry:
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -75,10 +77,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vremu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vremu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -90,7 +92,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -99,9 +101,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vremu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vremu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -112,7 +114,7 @@ entry:
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -122,10 +124,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vremu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vremu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -137,7 +139,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -146,9 +148,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vremu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vremu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -159,7 +161,7 @@ entry:
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -169,10 +171,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vremu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vremu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -184,7 +186,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -193,9 +195,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vremu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vremu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -206,7 +208,7 @@ entry:
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -216,10 +218,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vremu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vremu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -231,7 +233,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -240,9 +242,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vremu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vremu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -253,7 +255,7 @@ entry:
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
@@ -263,10 +265,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vremu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vremu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -278,7 +280,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -287,9 +289,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vremu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vremu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
@@ -300,7 +302,7 @@ entry:
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i8> %a
 }
@@ -310,10 +312,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8r.v v24, (a0)
@@ -326,7 +328,7 @@ entry:
     <vscale x 64 x i8> %1,
     <vscale x 64 x i8> %2,
     <vscale x 64 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 64 x i8> %a
 }
@@ -335,9 +337,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vremu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vremu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -348,7 +350,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -358,10 +360,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vremu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vremu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -373,7 +375,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -382,9 +384,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vremu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vremu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -395,7 +397,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -405,10 +407,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vremu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vremu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -420,7 +422,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -429,9 +431,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vremu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vremu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -442,7 +444,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -452,10 +454,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vremu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vremu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -467,7 +469,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -476,9 +478,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vremu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vremu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -489,7 +491,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -499,10 +501,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vremu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vremu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -514,7 +516,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -523,9 +525,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vremu_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vremu_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -536,7 +538,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -546,10 +548,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vremu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vremu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -561,7 +563,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -570,9 +572,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vremu_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vremu_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
@@ -583,7 +585,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -593,10 +595,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
@@ -609,7 +611,7 @@ entry:
     <vscale x 32 x i16> %1,
     <vscale x 32 x i16> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -618,9 +620,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vremu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vremu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -631,7 +633,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -641,10 +643,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vremu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vremu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -656,7 +658,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -665,9 +667,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vremu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vremu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -678,7 +680,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -688,10 +690,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vremu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vremu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -703,7 +705,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -712,9 +714,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vremu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vremu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -725,7 +727,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -735,10 +737,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vremu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vremu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -750,7 +752,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -759,9 +761,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vremu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vremu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -772,7 +774,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -782,10 +784,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vremu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vremu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -797,7 +799,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -806,9 +808,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vremu_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vremu_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
@@ -819,7 +821,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -829,10 +831,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
@@ -845,7 +847,7 @@ entry:
     <vscale x 16 x i32> %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -854,9 +856,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vremu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vremu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -867,7 +869,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -877,10 +879,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vremu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vremu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -892,7 +894,7 @@ entry:
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -901,9 +903,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vremu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vremu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -914,7 +916,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -924,10 +926,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vremu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vremu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -939,7 +941,7 @@ entry:
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -948,9 +950,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vremu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vremu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -961,7 +963,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -971,10 +973,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vremu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vremu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -986,7 +988,7 @@ entry:
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -995,9 +997,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vremu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vremu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
@@ -1008,7 +1010,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1018,10 +1020,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
@@ -1034,7 +1036,7 @@ entry:
     <vscale x 8 x i64> %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1043,9 +1045,9 @@ declare <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vremu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vremu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -1056,7 +1058,7 @@ entry:
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -1066,10 +1068,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vremu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vremu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -1081,7 +1083,7 @@ entry:
     <vscale x 1 x i8> %1,
     i8 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -1090,9 +1092,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vremu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vremu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -1103,7 +1105,7 @@ entry:
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -1113,10 +1115,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vremu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vremu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -1128,7 +1130,7 @@ entry:
     <vscale x 2 x i8> %1,
     i8 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -1137,9 +1139,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vremu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vremu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -1150,7 +1152,7 @@ entry:
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -1160,10 +1162,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vremu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vremu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -1175,7 +1177,7 @@ entry:
     <vscale x 4 x i8> %1,
     i8 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -1184,9 +1186,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vremu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vremu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -1197,7 +1199,7 @@ entry:
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1207,10 +1209,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vremu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vremu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -1222,7 +1224,7 @@ entry:
     <vscale x 8 x i8> %1,
     i8 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1231,9 +1233,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vremu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vremu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -1244,7 +1246,7 @@ entry:
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1254,10 +1256,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vremu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vremu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -1269,7 +1271,7 @@ entry:
     <vscale x 16 x i8> %1,
     i8 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1278,9 +1280,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vremu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vremu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -1291,7 +1293,7 @@ entry:
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1301,10 +1303,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vremu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vremu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1316,7 +1318,7 @@ entry:
     <vscale x 32 x i8> %1,
     i8 %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1325,9 +1327,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vremu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vremu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vx_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
@@ -1338,7 +1340,7 @@ entry:
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i8> %a
 }
@@ -1348,10 +1350,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.i8(
   <vscale x 64 x i8>,
   i8,
   <vscale x 64 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vremu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vremu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
@@ -1363,7 +1365,7 @@ entry:
     <vscale x 64 x i8> %1,
     i8 %2,
     <vscale x 64 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 64 x i8> %a
 }
@@ -1372,9 +1374,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vremu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vremu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1385,7 +1387,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1395,10 +1397,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vremu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vremu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1410,7 +1412,7 @@ entry:
     <vscale x 1 x i16> %1,
     i16 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1419,9 +1421,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vremu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vremu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1432,7 +1434,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1442,10 +1444,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vremu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vremu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1457,7 +1459,7 @@ entry:
     <vscale x 2 x i16> %1,
     i16 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1466,9 +1468,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vremu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vremu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1479,7 +1481,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1489,10 +1491,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vremu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vremu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1504,7 +1506,7 @@ entry:
     <vscale x 4 x i16> %1,
     i16 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1513,9 +1515,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vremu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vremu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1526,7 +1528,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1536,10 +1538,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vremu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vremu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1551,7 +1553,7 @@ entry:
     <vscale x 8 x i16> %1,
     i16 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1560,9 +1562,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vremu_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vremu_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1573,7 +1575,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1583,10 +1585,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vremu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vremu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1598,7 +1600,7 @@ entry:
     <vscale x 16 x i16> %1,
     i16 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1607,9 +1609,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vremu_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vremu_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vx_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -1620,7 +1622,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1630,10 +1632,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.i16(
   <vscale x 32 x i16>,
   i16,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vremu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vremu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -1645,7 +1647,7 @@ entry:
     <vscale x 32 x i16> %1,
     i16 %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1654,9 +1656,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vremu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vremu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1667,7 +1669,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1677,10 +1679,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vremu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vremu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1692,7 +1694,7 @@ entry:
     <vscale x 1 x i32> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1701,9 +1703,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vremu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vremu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1714,7 +1716,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1724,10 +1726,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vremu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vremu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1739,7 +1741,7 @@ entry:
     <vscale x 2 x i32> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1748,9 +1750,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vremu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vremu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1761,7 +1763,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1771,10 +1773,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vremu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vremu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1786,7 +1788,7 @@ entry:
     <vscale x 4 x i32> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1795,9 +1797,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vremu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vremu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1808,7 +1810,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1818,10 +1820,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vremu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vremu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1833,7 +1835,7 @@ entry:
     <vscale x 8 x i32> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1842,9 +1844,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vremu_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vremu_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_vx_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -1855,7 +1857,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1865,10 +1867,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.i32(
   <vscale x 16 x i32>,
   i32,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vremu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vremu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -1880,7 +1882,7 @@ entry:
     <vscale x 16 x i32> %1,
     i32 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1889,26 +1891,32 @@ declare <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   i64,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vremu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT:    vlse64.v v9, (a0), zero
-; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vremu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vremu_vx_nxv1i64_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vremu.vv v8, v8, v9
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vremu_vx_nxv1i64_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT:    vremu.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.i64(
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1918,28 +1926,34 @@ declare <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
   <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vremu.vv v8, v9, v10, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT:    vremu.vx v8, v9, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     i64 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1948,26 +1962,32 @@ declare <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   i64,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vremu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vremu.vv v8, v8, v10
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vremu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vremu_vx_nxv2i64_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vremu.vv v8, v8, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vremu_vx_nxv2i64_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT:    vremu.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.i64(
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1977,28 +1997,34 @@ declare <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
   <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vremu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vremu.vv v8, v10, v12, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT:    vremu.vx v8, v10, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     i64 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -2007,26 +2033,32 @@ declare <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   i64,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vremu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vremu.vv v8, v8, v12
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vremu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vremu_vx_nxv4i64_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vremu.vv v8, v8, v12
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vremu_vx_nxv4i64_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT:    vremu.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.i64(
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -2036,28 +2068,34 @@ declare <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
   <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vremu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vremu.vv v8, v12, v16, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT:    vremu.vx v8, v12, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     i64 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -2066,26 +2104,32 @@ declare <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   i64,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vremu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vremu_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vremu.vv v8, v8, v16
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vremu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vremu_vx_nxv8i64_nxv8i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vremu.vv v8, v8, v16
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vremu_vx_nxv8i64_nxv8i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT:    vremu.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.i64(
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -2095,28 +2139,34 @@ declare <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.i64(
   <vscale x 8 x i64>,
   i64,
   <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
-; CHECK-NEXT:    vlse64.v v24, (a0), zero
-; CHECK-NEXT:    vremu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT:    vlse64.v v24, (a0), zero
+; RV32-NEXT:    vremu.vv v8, v16, v24, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT:    vremu.vx v8, v16, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
     i64 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsbc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsbc-rv64.ll
deleted file mode 100644
index 77ebd753296b0..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vsbc-rv64.ll
+++ /dev/null
@@ -1,1058 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vsbc_vvm_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vsbc_vvm_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vsbc_vvm_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vsbc_vvm_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vsbc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vsbc_vvm_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vsbc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i1>,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vsbc_vvm_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv64i8_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vsbc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    <vscale x 64 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vsbc_vvm_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vsbc_vvm_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vsbc_vvm_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vsbc_vvm_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vsbc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vsbc_vvm_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vsbc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vsbc_vvm_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv32i16_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vsbc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    <vscale x 32 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vsbc_vvm_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vsbc_vvm_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vsbc_vvm_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vsbc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vsbc_vvm_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vsbc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vsbc_vvm_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i32_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vsbc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vsbc_vvm_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vsbc_vvm_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vsbc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vsbc_vvm_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vsbc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vsbc_vvm_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT:    vsbc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vsbc_vxm_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vsbc_vxm_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i8_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vsbc_vxm_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i8_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vsbc_vxm_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i8_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vsbc_vxm_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i8_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vsbc_vxm_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv32i8_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    <vscale x 32 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i8,
-  <vscale x 64 x i1>,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vsbc_vxm_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv64i8_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    i8 %1,
-    <vscale x 64 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vsbc_vxm_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i16_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vsbc_vxm_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i16_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vsbc_vxm_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i16_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vsbc_vxm_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i16_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vsbc_vxm_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i16_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i16,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vsbc_vxm_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv32i16_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    i16 %1,
-    <vscale x 32 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vsbc_vxm_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i32_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vsbc_vxm_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i32_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vsbc_vxm_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i32_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vsbc_vxm_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i32_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vsbc_vxm_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i32_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    i32 %1,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    i64 %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    i64 %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    i64 %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    i64 %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsbc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsbc.ll
similarity index 82%
rename from llvm/test/CodeGen/RISCV/rvv/vsbc-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vsbc.ll
index eefe37dafb24e..6ec17c0430a3d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsbc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsbc.ll
@@ -1,14 +1,16 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
 declare <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -20,7 +22,7 @@ entry:
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i8> %a
 }
@@ -30,9 +32,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vsbc_vvm_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vsbc_vvm_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -44,7 +46,7 @@ entry:
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i8> %a
 }
@@ -54,9 +56,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vsbc_vvm_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vsbc_vvm_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -68,7 +70,7 @@ entry:
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i8> %a
 }
@@ -78,9 +80,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vsbc_vvm_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vsbc_vvm_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -92,7 +94,7 @@ entry:
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i8> %a
 }
@@ -102,9 +104,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vsbc_vvm_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vsbc_vvm_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -116,7 +118,7 @@ entry:
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i8> %a
 }
@@ -126,9 +128,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vsbc_vvm_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vsbc_vvm_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -140,7 +142,7 @@ entry:
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
     <vscale x 32 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 32 x i8> %a
 }
@@ -150,9 +152,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vsbc_vvm_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+define <vscale x 64 x i8> @intrinsic_vsbc_vvm_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
@@ -164,7 +166,7 @@ entry:
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
     <vscale x 64 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 64 x i8> %a
 }
@@ -174,9 +176,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vsbc_vvm_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsbc_vvm_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -188,7 +190,7 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i16> %a
 }
@@ -198,9 +200,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vsbc_vvm_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsbc_vvm_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -212,7 +214,7 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i16> %a
 }
@@ -222,9 +224,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vsbc_vvm_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsbc_vvm_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -236,7 +238,7 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i16> %a
 }
@@ -246,9 +248,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vsbc_vvm_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsbc_vvm_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -260,7 +262,7 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i16> %a
 }
@@ -270,9 +272,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vsbc_vvm_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsbc_vvm_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -284,7 +286,7 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i16> %a
 }
@@ -294,9 +296,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vsbc_vvm_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsbc_vvm_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
@@ -308,7 +310,7 @@ entry:
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
     <vscale x 32 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 32 x i16> %a
 }
@@ -318,9 +320,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vsbc_vvm_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsbc_vvm_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -332,7 +334,7 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i32> %a
 }
@@ -342,9 +344,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vsbc_vvm_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsbc_vvm_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -356,7 +358,7 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i32> %a
 }
@@ -366,9 +368,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vsbc_vvm_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsbc_vvm_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -380,7 +382,7 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i32> %a
 }
@@ -390,9 +392,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vsbc_vvm_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsbc_vvm_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -404,7 +406,7 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i32> %a
 }
@@ -414,9 +416,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vsbc_vvm_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsbc_vvm_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
@@ -428,7 +430,7 @@ entry:
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i32> %a
 }
@@ -438,9 +440,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vsbc_vvm_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vsbc_vvm_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -452,7 +454,7 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i64> %a
 }
@@ -462,9 +464,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vsbc_vvm_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vsbc_vvm_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -476,7 +478,7 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i64> %a
 }
@@ -486,9 +488,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vsbc_vvm_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vsbc_vvm_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -500,7 +502,7 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i64> %a
 }
@@ -510,9 +512,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vsbc_vvm_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vsbc_vvm_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
@@ -524,7 +526,7 @@ entry:
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i64> %a
 }
@@ -534,9 +536,9 @@ declare <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vsbc_vxm_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsbc_vxm_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -548,7 +550,7 @@ entry:
     <vscale x 1 x i8> %0,
     i8 %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i8> %a
 }
@@ -558,9 +560,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vsbc_vxm_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vsbc_vxm_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -572,7 +574,7 @@ entry:
     <vscale x 2 x i8> %0,
     i8 %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i8> %a
 }
@@ -582,9 +584,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vsbc_vxm_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vsbc_vxm_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -596,7 +598,7 @@ entry:
     <vscale x 4 x i8> %0,
     i8 %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i8> %a
 }
@@ -606,9 +608,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vsbc_vxm_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vsbc_vxm_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -620,7 +622,7 @@ entry:
     <vscale x 8 x i8> %0,
     i8 %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i8> %a
 }
@@ -630,9 +632,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vsbc_vxm_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vsbc_vxm_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -644,7 +646,7 @@ entry:
     <vscale x 16 x i8> %0,
     i8 %1,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i8> %a
 }
@@ -654,9 +656,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vsbc_vxm_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vsbc_vxm_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -668,7 +670,7 @@ entry:
     <vscale x 32 x i8> %0,
     i8 %1,
     <vscale x 32 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 32 x i8> %a
 }
@@ -678,9 +680,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.i8(
   <vscale x 64 x i8>,
   i8,
   <vscale x 64 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vsbc_vxm_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+define <vscale x 64 x i8> @intrinsic_vsbc_vxm_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
@@ -692,7 +694,7 @@ entry:
     <vscale x 64 x i8> %0,
     i8 %1,
     <vscale x 64 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 64 x i8> %a
 }
@@ -702,9 +704,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vsbc_vxm_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsbc_vxm_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -716,7 +718,7 @@ entry:
     <vscale x 1 x i16> %0,
     i16 %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i16> %a
 }
@@ -726,9 +728,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vsbc_vxm_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsbc_vxm_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -740,7 +742,7 @@ entry:
     <vscale x 2 x i16> %0,
     i16 %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i16> %a
 }
@@ -750,9 +752,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vsbc_vxm_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsbc_vxm_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -764,7 +766,7 @@ entry:
     <vscale x 4 x i16> %0,
     i16 %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i16> %a
 }
@@ -774,9 +776,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vsbc_vxm_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsbc_vxm_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -788,7 +790,7 @@ entry:
     <vscale x 8 x i16> %0,
     i16 %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i16> %a
 }
@@ -798,9 +800,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vsbc_vxm_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsbc_vxm_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -812,7 +814,7 @@ entry:
     <vscale x 16 x i16> %0,
     i16 %1,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i16> %a
 }
@@ -822,9 +824,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.i16(
   <vscale x 32 x i16>,
   i16,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vsbc_vxm_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsbc_vxm_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -836,7 +838,7 @@ entry:
     <vscale x 32 x i16> %0,
     i16 %1,
     <vscale x 32 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 32 x i16> %a
 }
@@ -846,9 +848,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vsbc_vxm_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsbc_vxm_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -860,7 +862,7 @@ entry:
     <vscale x 1 x i32> %0,
     i32 %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i32> %a
 }
@@ -870,9 +872,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vsbc_vxm_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsbc_vxm_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -884,7 +886,7 @@ entry:
     <vscale x 2 x i32> %0,
     i32 %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i32> %a
 }
@@ -894,9 +896,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vsbc_vxm_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsbc_vxm_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -908,7 +910,7 @@ entry:
     <vscale x 4 x i32> %0,
     i32 %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i32> %a
 }
@@ -918,9 +920,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vsbc_vxm_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsbc_vxm_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -932,7 +934,7 @@ entry:
     <vscale x 8 x i32> %0,
     i32 %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i32> %a
 }
@@ -942,9 +944,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.i32(
   <vscale x 16 x i32>,
   i32,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vsbc_vxm_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsbc_vxm_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -956,7 +958,7 @@ entry:
     <vscale x 16 x i32> %0,
     i32 %1,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i32> %a
 }
@@ -966,27 +968,33 @@ declare <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
   <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT:    vlse64.v v9, (a0), zero
-; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vsbc.vvm v8, v8, v9, v0
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT:    vsbc.vxm v8, v8, a0, v0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.i64(
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
     i64 %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i64> %a
 }
@@ -996,27 +1004,33 @@ declare <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
   <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vsbc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vsbc.vvm v8, v8, v10, v0
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT:    vsbc.vxm v8, v8, a0, v0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.i64(
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
     i64 %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1026,27 +1040,33 @@ declare <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
   <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vsbc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vsbc.vvm v8, v8, v12, v0
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT:    vsbc.vxm v8, v8, a0, v0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.i64(
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
     i64 %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1056,27 +1076,33 @@ declare <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.i64(
   <vscale x 8 x i64>,
   i64,
   <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vsbc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; RV32-LABEL: intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vsbc.vvm v8, v8, v16, v0
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT:    vsbc.vxm v8, v8, a0, v0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.i64(
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
     i64 %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll
deleted file mode 100644
index 29772e3c008bd..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll
+++ /dev/null
@@ -1,458 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s
-
-define <vscale x 1 x half> @vfmerge_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vfmerge_vv_nxv1f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x half> %va, <vscale x 1 x half> %vb
-  ret <vscale x 1 x half> %vc
-}
-
-define <vscale x 1 x half> @vfmerge_fv_nxv1f16(<vscale x 1 x half> %va, half %b, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vfmerge_fv_nxv1f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x half> poison, half %b, i32 0
-  %splat = shufflevector <vscale x 1 x half> %head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x half> %splat, <vscale x 1 x half> %va
-  ret <vscale x 1 x half> %vc
-}
-
-define <vscale x 2 x half> @vfmerge_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vfmerge_vv_nxv2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
-; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x half> %va, <vscale x 2 x half> %vb
-  ret <vscale x 2 x half> %vc
-}
-
-define <vscale x 2 x half> @vfmerge_fv_nxv2f16(<vscale x 2 x half> %va, half %b, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vfmerge_fv_nxv2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x half> poison, half %b, i32 0
-  %splat = shufflevector <vscale x 2 x half> %head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x half> %splat, <vscale x 2 x half> %va
-  ret <vscale x 2 x half> %vc
-}
-
-define <vscale x 4 x half> @vfmerge_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vfmerge_vv_nxv4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x half> %va, <vscale x 4 x half> %vb
-  ret <vscale x 4 x half> %vc
-}
-
-define <vscale x 4 x half> @vfmerge_fv_nxv4f16(<vscale x 4 x half> %va, half %b, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vfmerge_fv_nxv4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x half> poison, half %b, i32 0
-  %splat = shufflevector <vscale x 4 x half> %head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x half> %splat, <vscale x 4 x half> %va
-  ret <vscale x 4 x half> %vc
-}
-
-define <vscale x 8 x half> @vfmerge_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vfmerge_vv_nxv8f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x half> %va, <vscale x 8 x half> %vb
-  ret <vscale x 8 x half> %vc
-}
-
-define <vscale x 8 x half> @vfmerge_fv_nxv8f16(<vscale x 8 x half> %va, half %b, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vfmerge_fv_nxv8f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x half> poison, half %b, i32 0
-  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x half> %splat, <vscale x 8 x half> %va
-  ret <vscale x 8 x half> %vc
-}
-
-define <vscale x 8 x half> @vfmerge_zv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vfmerge_zv_nxv8f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x half> poison, half zeroinitializer, i32 0
-  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x half> %splat, <vscale x 8 x half> %va
-  ret <vscale x 8 x half> %vc
-}
-
-define <vscale x 8 x half> @vmerge_truelhs_nxv8f16_0(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
-; CHECK-LABEL: vmerge_truelhs_nxv8f16_0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    ret
-  %mhead = insertelement <vscale x 8 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <vscale x 8 x i1> %mhead, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = select <vscale x 8 x i1> %mtrue, <vscale x 8 x half> %va, <vscale x 8 x half> %vb
-  ret <vscale x 8 x half> %vc
-}
-
-define <vscale x 8 x half> @vmerge_falselhs_nxv8f16_0(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
-; CHECK-LABEL: vmerge_falselhs_nxv8f16_0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 8 x i1> zeroinitializer, <vscale x 8 x half> %va, <vscale x 8 x half> %vb
-  ret <vscale x 8 x half> %vc
-}
-
-define <vscale x 16 x half> @vfmerge_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %cond) {
-; CHECK-LABEL: vfmerge_vv_nxv16f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 16 x i1> %cond, <vscale x 16 x half> %va, <vscale x 16 x half> %vb
-  ret <vscale x 16 x half> %vc
-}
-
-define <vscale x 16 x half> @vfmerge_fv_nxv16f16(<vscale x 16 x half> %va, half %b, <vscale x 16 x i1> %cond) {
-; CHECK-LABEL: vfmerge_fv_nxv16f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x half> poison, half %b, i32 0
-  %splat = shufflevector <vscale x 16 x half> %head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = select <vscale x 16 x i1> %cond, <vscale x 16 x half> %splat, <vscale x 16 x half> %va
-  ret <vscale x 16 x half> %vc
-}
-
-define <vscale x 32 x half> @vfmerge_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %cond) {
-; CHECK-LABEL: vfmerge_vv_nxv32f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
-; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 32 x i1> %cond, <vscale x 32 x half> %va, <vscale x 32 x half> %vb
-  ret <vscale x 32 x half> %vc
-}
-
-define <vscale x 32 x half> @vfmerge_fv_nxv32f16(<vscale x 32 x half> %va, half %b, <vscale x 32 x i1> %cond) {
-; CHECK-LABEL: vfmerge_fv_nxv32f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x half> poison, half %b, i32 0
-  %splat = shufflevector <vscale x 32 x half> %head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = select <vscale x 32 x i1> %cond, <vscale x 32 x half> %splat, <vscale x 32 x half> %va
-  ret <vscale x 32 x half> %vc
-}
-
-define <vscale x 1 x float> @vfmerge_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vfmerge_vv_nxv1f32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
-; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x float> %va, <vscale x 1 x float> %vb
-  ret <vscale x 1 x float> %vc
-}
-
-define <vscale x 1 x float> @vfmerge_fv_nxv1f32(<vscale x 1 x float> %va, float %b, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vfmerge_fv_nxv1f32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x float> poison, float %b, i32 0
-  %splat = shufflevector <vscale x 1 x float> %head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x float> %splat, <vscale x 1 x float> %va
-  ret <vscale x 1 x float> %vc
-}
-
-define <vscale x 2 x float> @vfmerge_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vfmerge_vv_nxv2f32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x float> %va, <vscale x 2 x float> %vb
-  ret <vscale x 2 x float> %vc
-}
-
-define <vscale x 2 x float> @vfmerge_fv_nxv2f32(<vscale x 2 x float> %va, float %b, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vfmerge_fv_nxv2f32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x float> poison, float %b, i32 0
-  %splat = shufflevector <vscale x 2 x float> %head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x float> %splat, <vscale x 2 x float> %va
-  ret <vscale x 2 x float> %vc
-}
-
-define <vscale x 4 x float> @vfmerge_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vfmerge_vv_nxv4f32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
-; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x float> %va, <vscale x 4 x float> %vb
-  ret <vscale x 4 x float> %vc
-}
-
-define <vscale x 4 x float> @vfmerge_fv_nxv4f32(<vscale x 4 x float> %va, float %b, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vfmerge_fv_nxv4f32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x float> poison, float %b, i32 0
-  %splat = shufflevector <vscale x 4 x float> %head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x float> %splat, <vscale x 4 x float> %va
-  ret <vscale x 4 x float> %vc
-}
-
-define <vscale x 8 x float> @vfmerge_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vfmerge_vv_nxv8f32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x float> %va, <vscale x 8 x float> %vb
-  ret <vscale x 8 x float> %vc
-}
-
-define <vscale x 8 x float> @vfmerge_fv_nxv8f32(<vscale x 8 x float> %va, float %b, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vfmerge_fv_nxv8f32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x float> poison, float %b, i32 0
-  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x float> %splat, <vscale x 8 x float> %va
-  ret <vscale x 8 x float> %vc
-}
-
-define <vscale x 8 x float> @vfmerge_zv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vfmerge_zv_nxv8f32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x float> poison, float zeroinitializer, i32 0
-  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x float> %splat, <vscale x 8 x float> %va
-  ret <vscale x 8 x float> %vc
-}
-
-define <vscale x 16 x float> @vfmerge_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %cond) {
-; CHECK-LABEL: vfmerge_vv_nxv16f32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 16 x i1> %cond, <vscale x 16 x float> %va, <vscale x 16 x float> %vb
-  ret <vscale x 16 x float> %vc
-}
-
-define <vscale x 16 x float> @vfmerge_fv_nxv16f32(<vscale x 16 x float> %va, float %b, <vscale x 16 x i1> %cond) {
-; CHECK-LABEL: vfmerge_fv_nxv16f32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x float> poison, float %b, i32 0
-  %splat = shufflevector <vscale x 16 x float> %head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = select <vscale x 16 x i1> %cond, <vscale x 16 x float> %splat, <vscale x 16 x float> %va
-  ret <vscale x 16 x float> %vc
-}
-
-define <vscale x 1 x double> @vfmerge_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vfmerge_vv_nxv1f64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x double> %va, <vscale x 1 x double> %vb
-  ret <vscale x 1 x double> %vc
-}
-
-define <vscale x 1 x double> @vfmerge_fv_nxv1f64(<vscale x 1 x double> %va, double %b, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vfmerge_fv_nxv1f64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x double> poison, double %b, i32 0
-  %splat = shufflevector <vscale x 1 x double> %head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x double> %splat, <vscale x 1 x double> %va
-  ret <vscale x 1 x double> %vc
-}
-
-define <vscale x 2 x double> @vfmerge_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vfmerge_vv_nxv2f64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x double> %va, <vscale x 2 x double> %vb
-  ret <vscale x 2 x double> %vc
-}
-
-define <vscale x 2 x double> @vfmerge_fv_nxv2f64(<vscale x 2 x double> %va, double %b, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vfmerge_fv_nxv2f64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x double> poison, double %b, i32 0
-  %splat = shufflevector <vscale x 2 x double> %head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x double> %splat, <vscale x 2 x double> %va
-  ret <vscale x 2 x double> %vc
-}
-
-define <vscale x 4 x double> @vfmerge_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vfmerge_vv_nxv4f64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x double> %va, <vscale x 4 x double> %vb
-  ret <vscale x 4 x double> %vc
-}
-
-define <vscale x 4 x double> @vfmerge_fv_nxv4f64(<vscale x 4 x double> %va, double %b, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vfmerge_fv_nxv4f64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x double> poison, double %b, i32 0
-  %splat = shufflevector <vscale x 4 x double> %head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x double> %splat, <vscale x 4 x double> %va
-  ret <vscale x 4 x double> %vc
-}
-
-define <vscale x 8 x double> @vfmerge_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vfmerge_vv_nxv8f64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x double> %va, <vscale x 8 x double> %vb
-  ret <vscale x 8 x double> %vc
-}
-
-define <vscale x 8 x double> @vfmerge_fv_nxv8f64(<vscale x 8 x double> %va, double %b, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vfmerge_fv_nxv8f64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x double> poison, double %b, i32 0
-  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x double> %splat, <vscale x 8 x double> %va
-  ret <vscale x 8 x double> %vc
-}
-
-define <vscale x 8 x double> @vfmerge_zv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vfmerge_zv_nxv8f64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x double> poison, double zeroinitializer, i32 0
-  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x double> %splat, <vscale x 8 x double> %va
-  ret <vscale x 8 x double> %vc
-}
-
-define <vscale x 16 x double> @vselect_combine_regression(<vscale x 16 x i64> %va, <vscale x 16 x double> %vb) {
-; CHECK-LABEL: vselect_combine_regression:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    slli a1, a1, 4
-; CHECK-NEXT:    sub sp, sp, a1
-; CHECK-NEXT:    addi a1, sp, 16
-; CHECK-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    slli a1, a1, 3
-; CHECK-NEXT:    add a1, a0, a1
-; CHECK-NEXT:    vl8re64.v v8, (a1)
-; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    slli a1, a1, 3
-; CHECK-NEXT:    add a1, sp, a1
-; CHECK-NEXT:    addi a1, a1, 16
-; CHECK-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT:    vl8re64.v v8, (a0)
-; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT:    vmseq.vi v24, v16, 0
-; CHECK-NEXT:    addi a0, sp, 16
-; CHECK-NEXT:    vl8re8.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT:    vmseq.vi v0, v16, 0
-; CHECK-NEXT:    vmv.v.i v16, 0
-; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v24
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 3
-; CHECK-NEXT:    add a0, sp, a0
-; CHECK-NEXT:    addi a0, a0, 16
-; CHECK-NEXT:    vl8re8.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT:    vmerge.vvm v16, v16, v24, v0
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 4
-; CHECK-NEXT:    add sp, sp, a0
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-  %cond = icmp eq <vscale x 16 x i64> %va, zeroinitializer
-  %sel = select <vscale x 16 x i1> %cond, <vscale x 16 x double> %vb, <vscale x 16 x double> zeroinitializer
-  ret <vscale x 16 x double> %sel
-}
-
-define void @vselect_legalize_regression(<vscale x 16 x double> %a, <vscale x 16 x i1> %ma, <vscale x 16 x i1> %mb, <vscale x 16 x double>* %out) {
-; CHECK-LABEL: vselect_legalize_regression:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a2, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vlm.v v24, (a0)
-; CHECK-NEXT:    vmand.mm v1, v0, v24
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a2, a0, 3
-; CHECK-NEXT:    vsetvli a3, zero, e8, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v0, v1, a2
-; CHECK-NEXT:    vsetvli a2, zero, e64, m8, ta, ma
-; CHECK-NEXT:    vmv.v.i v24, 0
-; CHECK-NEXT:    vmerge.vvm v16, v24, v16, v0
-; CHECK-NEXT:    vmv1r.v v0, v1
-; CHECK-NEXT:    vmerge.vvm v8, v24, v8, v0
-; CHECK-NEXT:    vs8r.v v8, (a1)
-; CHECK-NEXT:    slli a0, a0, 3
-; CHECK-NEXT:    add a0, a1, a0
-; CHECK-NEXT:    vs8r.v v16, (a0)
-; CHECK-NEXT:    ret
-  %cond = and <vscale x 16 x i1> %ma, %mb
-  %sel = select <vscale x 16 x i1> %cond, <vscale x 16 x double> %a, <vscale x 16 x double> zeroinitializer
-  store <vscale x 16 x double> %sel, <vscale x 16 x double>* %out
-  ret void
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
similarity index 99%
rename from llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
index 54682aaf2cae8..24426b5b5ce5a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+m,+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
 
 define <vscale x 1 x half> @vfmerge_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %cond) {
 ; CHECK-LABEL: vfmerge_vv_nxv1f16:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv32.ll
deleted file mode 100644
index a3a61b29b6608..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv32.ll
+++ /dev/null
@@ -1,827 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
-
-define <vscale x 1 x i8> @vmerge_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv1i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
-; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x i8> %va, <vscale x 1 x i8> %vb
-  ret <vscale x 1 x i8> %vc
-}
-
-define <vscale x 1 x i8> @vmerge_xv_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv1i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
-; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
-  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x i8> %splat, <vscale x 1 x i8> %va
-  ret <vscale x 1 x i8> %vc
-}
-
-define <vscale x 1 x i8> @vmerge_iv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv1i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
-; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i8> poison, i8 3, i32 0
-  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x i8> %splat, <vscale x 1 x i8> %va
-  ret <vscale x 1 x i8> %vc
-}
-
-define <vscale x 2 x i8> @vmerge_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv2i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
-; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x i8> %va, <vscale x 2 x i8> %vb
-  ret <vscale x 2 x i8> %vc
-}
-
-define <vscale x 2 x i8> @vmerge_xv_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv2i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
-; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
-  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x i8> %splat, <vscale x 2 x i8> %va
-  ret <vscale x 2 x i8> %vc
-}
-
-define <vscale x 2 x i8> @vmerge_iv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv2i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
-; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i8> poison, i8 3, i32 0
-  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x i8> %splat, <vscale x 2 x i8> %va
-  ret <vscale x 2 x i8> %vc
-}
-
-define <vscale x 3 x i8> @vmerge_vv_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv3i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 3 x i1> %cond, <vscale x 3 x i8> %va, <vscale x 3 x i8> %vb
-  ret <vscale x 3 x i8> %vc
-}
-
-define <vscale x 3 x i8> @vmerge_xv_nxv3i8(<vscale x 3 x i8> %va, i8 signext %b, <vscale x 3 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv3i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
-  %splat = shufflevector <vscale x 3 x i8> %head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
-  %vc = select <vscale x 3 x i1> %cond, <vscale x 3 x i8> %splat, <vscale x 3 x i8> %va
-  ret <vscale x 3 x i8> %vc
-}
-
-define <vscale x 3 x i8> @vmerge_iv_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv3i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 3 x i8> poison, i8 3, i32 0
-  %splat = shufflevector <vscale x 3 x i8> %head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
-  %vc = select <vscale x 3 x i1> %cond, <vscale x 3 x i8> %splat, <vscale x 3 x i8> %va
-  ret <vscale x 3 x i8> %vc
-}
-
-define <vscale x 4 x i8> @vmerge_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv4i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x i8> %va, <vscale x 4 x i8> %vb
-  ret <vscale x 4 x i8> %vc
-}
-
-define <vscale x 4 x i8> @vmerge_xv_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv4i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
-  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x i8> %splat, <vscale x 4 x i8> %va
-  ret <vscale x 4 x i8> %vc
-}
-
-define <vscale x 4 x i8> @vmerge_iv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv4i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i8> poison, i8 3, i32 0
-  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x i8> %splat, <vscale x 4 x i8> %va
-  ret <vscale x 4 x i8> %vc
-}
-
-define <vscale x 8 x i8> @vmerge_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv8i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x i8> %va, <vscale x 8 x i8> %vb
-  ret <vscale x 8 x i8> %vc
-}
-
-define <vscale x 8 x i8> @vmerge_xv_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv8i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x i8> %splat, <vscale x 8 x i8> %va
-  ret <vscale x 8 x i8> %vc
-}
-
-define <vscale x 8 x i8> @vmerge_iv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv8i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 3, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x i8> %splat, <vscale x 8 x i8> %va
-  ret <vscale x 8 x i8> %vc
-}
-
-define <vscale x 16 x i8> @vmerge_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv16i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 16 x i1> %cond, <vscale x 16 x i8> %va, <vscale x 16 x i8> %vb
-  ret <vscale x 16 x i8> %vc
-}
-
-define <vscale x 16 x i8> @vmerge_xv_nxv16i8(<vscale x 16 x i8> %va, i8 signext %b, <vscale x 16 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv16i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = select <vscale x 16 x i1> %cond, <vscale x 16 x i8> %splat, <vscale x 16 x i8> %va
-  ret <vscale x 16 x i8> %vc
-}
-
-define <vscale x 16 x i8> @vmerge_iv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv16i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i8> poison, i8 3, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = select <vscale x 16 x i1> %cond, <vscale x 16 x i8> %splat, <vscale x 16 x i8> %va
-  ret <vscale x 16 x i8> %vc
-}
-
-define <vscale x 32 x i8> @vmerge_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv32i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
-; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 32 x i1> %cond, <vscale x 32 x i8> %va, <vscale x 32 x i8> %vb
-  ret <vscale x 32 x i8> %vc
-}
-
-define <vscale x 32 x i8> @vmerge_xv_nxv32i8(<vscale x 32 x i8> %va, i8 signext %b, <vscale x 32 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv32i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
-  %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = select <vscale x 32 x i1> %cond, <vscale x 32 x i8> %splat, <vscale x 32 x i8> %va
-  ret <vscale x 32 x i8> %vc
-}
-
-define <vscale x 32 x i8> @vmerge_iv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv32i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
-; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i8> poison, i8 3, i32 0
-  %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = select <vscale x 32 x i1> %cond, <vscale x 32 x i8> %splat, <vscale x 32 x i8> %va
-  ret <vscale x 32 x i8> %vc
-}
-
-define <vscale x 64 x i8> @vmerge_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv64i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
-; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 64 x i1> %cond, <vscale x 64 x i8> %va, <vscale x 64 x i8> %vb
-  ret <vscale x 64 x i8> %vc
-}
-
-define <vscale x 64 x i8> @vmerge_xv_nxv64i8(<vscale x 64 x i8> %va, i8 signext %b, <vscale x 64 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv64i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
-; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
-  %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %vc = select <vscale x 64 x i1> %cond, <vscale x 64 x i8> %splat, <vscale x 64 x i8> %va
-  ret <vscale x 64 x i8> %vc
-}
-
-define <vscale x 64 x i8> @vmerge_iv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv64i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
-; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i8> poison, i8 3, i32 0
-  %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %vc = select <vscale x 64 x i1> %cond, <vscale x 64 x i8> %splat, <vscale x 64 x i8> %va
-  ret <vscale x 64 x i8> %vc
-}
-
-define <vscale x 1 x i16> @vmerge_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv1i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x i16> %va, <vscale x 1 x i16> %vb
-  ret <vscale x 1 x i16> %vc
-}
-
-define <vscale x 1 x i16> @vmerge_xv_nxv1i16(<vscale x 1 x i16> %va, i16 signext %b, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv1i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
-  %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x i16> %splat, <vscale x 1 x i16> %va
-  ret <vscale x 1 x i16> %vc
-}
-
-define <vscale x 1 x i16> @vmerge_iv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv1i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i16> poison, i16 3, i32 0
-  %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x i16> %splat, <vscale x 1 x i16> %va
-  ret <vscale x 1 x i16> %vc
-}
-
-define <vscale x 2 x i16> @vmerge_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv2i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
-; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x i16> %va, <vscale x 2 x i16> %vb
-  ret <vscale x 2 x i16> %vc
-}
-
-define <vscale x 2 x i16> @vmerge_xv_nxv2i16(<vscale x 2 x i16> %va, i16 signext %b, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv2i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
-; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
-  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x i16> %splat, <vscale x 2 x i16> %va
-  ret <vscale x 2 x i16> %vc
-}
-
-define <vscale x 2 x i16> @vmerge_iv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv2i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
-; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i16> poison, i16 3, i32 0
-  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x i16> %splat, <vscale x 2 x i16> %va
-  ret <vscale x 2 x i16> %vc
-}
-
-define <vscale x 4 x i16> @vmerge_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv4i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x i16> %va, <vscale x 4 x i16> %vb
-  ret <vscale x 4 x i16> %vc
-}
-
-define <vscale x 4 x i16> @vmerge_xv_nxv4i16(<vscale x 4 x i16> %va, i16 signext %b, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv4i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
-  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x i16> %splat, <vscale x 4 x i16> %va
-  ret <vscale x 4 x i16> %vc
-}
-
-define <vscale x 4 x i16> @vmerge_iv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv4i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i16> poison, i16 3, i32 0
-  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x i16> %splat, <vscale x 4 x i16> %va
-  ret <vscale x 4 x i16> %vc
-}
-
-define <vscale x 8 x i16> @vmerge_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv8i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x i16> %va, <vscale x 8 x i16> %vb
-  ret <vscale x 8 x i16> %vc
-}
-
-define <vscale x 8 x i16> @vmerge_xv_nxv8i16(<vscale x 8 x i16> %va, i16 signext %b, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv8i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x i16> %splat, <vscale x 8 x i16> %va
-  ret <vscale x 8 x i16> %vc
-}
-
-define <vscale x 8 x i16> @vmerge_iv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv8i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 3, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x i16> %splat, <vscale x 8 x i16> %va
-  ret <vscale x 8 x i16> %vc
-}
-
-define <vscale x 16 x i16> @vmerge_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv16i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 16 x i1> %cond, <vscale x 16 x i16> %va, <vscale x 16 x i16> %vb
-  ret <vscale x 16 x i16> %vc
-}
-
-define <vscale x 16 x i16> @vmerge_xv_nxv16i16(<vscale x 16 x i16> %va, i16 signext %b, <vscale x 16 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv16i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
-  %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = select <vscale x 16 x i1> %cond, <vscale x 16 x i16> %splat, <vscale x 16 x i16> %va
-  ret <vscale x 16 x i16> %vc
-}
-
-define <vscale x 16 x i16> @vmerge_iv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv16i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i16> poison, i16 3, i32 0
-  %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = select <vscale x 16 x i1> %cond, <vscale x 16 x i16> %splat, <vscale x 16 x i16> %va
-  ret <vscale x 16 x i16> %vc
-}
-
-define <vscale x 32 x i16> @vmerge_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv32i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
-; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 32 x i1> %cond, <vscale x 32 x i16> %va, <vscale x 32 x i16> %vb
-  ret <vscale x 32 x i16> %vc
-}
-
-define <vscale x 32 x i16> @vmerge_xv_nxv32i16(<vscale x 32 x i16> %va, i16 signext %b, <vscale x 32 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv32i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
-; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
-  %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = select <vscale x 32 x i1> %cond, <vscale x 32 x i16> %splat, <vscale x 32 x i16> %va
-  ret <vscale x 32 x i16> %vc
-}
-
-define <vscale x 32 x i16> @vmerge_iv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv32i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
-; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i16> poison, i16 3, i32 0
-  %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = select <vscale x 32 x i1> %cond, <vscale x 32 x i16> %splat, <vscale x 32 x i16> %va
-  ret <vscale x 32 x i16> %vc
-}
-
-define <vscale x 1 x i32> @vmerge_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv1i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
-; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x i32> %va, <vscale x 1 x i32> %vb
-  ret <vscale x 1 x i32> %vc
-}
-
-define <vscale x 1 x i32> @vmerge_xv_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv1i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
-; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
-  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x i32> %splat, <vscale x 1 x i32> %va
-  ret <vscale x 1 x i32> %vc
-}
-
-define <vscale x 1 x i32> @vmerge_iv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv1i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
-; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i32> poison, i32 3, i32 0
-  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x i32> %splat, <vscale x 1 x i32> %va
-  ret <vscale x 1 x i32> %vc
-}
-
-define <vscale x 2 x i32> @vmerge_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv2i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x i32> %va, <vscale x 2 x i32> %vb
-  ret <vscale x 2 x i32> %vc
-}
-
-define <vscale x 2 x i32> @vmerge_xv_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv2i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
-  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x i32> %splat, <vscale x 2 x i32> %va
-  ret <vscale x 2 x i32> %vc
-}
-
-define <vscale x 2 x i32> @vmerge_iv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv2i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i32> poison, i32 3, i32 0
-  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x i32> %splat, <vscale x 2 x i32> %va
-  ret <vscale x 2 x i32> %vc
-}
-
-define <vscale x 4 x i32> @vmerge_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv4i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
-; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x i32> %va, <vscale x 4 x i32> %vb
-  ret <vscale x 4 x i32> %vc
-}
-
-define <vscale x 4 x i32> @vmerge_xv_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv4i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
-; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x i32> %splat, <vscale x 4 x i32> %va
-  ret <vscale x 4 x i32> %vc
-}
-
-define <vscale x 4 x i32> @vmerge_iv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv4i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
-; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 3, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x i32> %splat, <vscale x 4 x i32> %va
-  ret <vscale x 4 x i32> %vc
-}
-
-define <vscale x 8 x i32> @vmerge_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv8i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x i32> %va, <vscale x 8 x i32> %vb
-  ret <vscale x 8 x i32> %vc
-}
-
-define <vscale x 8 x i32> @vmerge_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv8i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x i32> %splat, <vscale x 8 x i32> %va
-  ret <vscale x 8 x i32> %vc
-}
-
-define <vscale x 8 x i32> @vmerge_iv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv8i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 3, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x i32> %splat, <vscale x 8 x i32> %va
-  ret <vscale x 8 x i32> %vc
-}
-
-define <vscale x 16 x i32> @vmerge_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv16i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 16 x i1> %cond, <vscale x 16 x i32> %va, <vscale x 16 x i32> %vb
-  ret <vscale x 16 x i32> %vc
-}
-
-define <vscale x 16 x i32> @vmerge_xv_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv16i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
-  %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = select <vscale x 16 x i1> %cond, <vscale x 16 x i32> %splat, <vscale x 16 x i32> %va
-  ret <vscale x 16 x i32> %vc
-}
-
-define <vscale x 16 x i32> @vmerge_iv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv16i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i32> poison, i32 3, i32 0
-  %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = select <vscale x 16 x i1> %cond, <vscale x 16 x i32> %splat, <vscale x 16 x i32> %va
-  ret <vscale x 16 x i32> %vc
-}
-
-define <vscale x 1 x i64> @vmerge_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv1i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x i64> %va, <vscale x 1 x i64> %vb
-  ret <vscale x 1 x i64> %vc
-}
-
-define <vscale x 1 x i64> @vmerge_xv_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv1i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vlse64.v v8, (a0), zero, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x i64> %splat, <vscale x 1 x i64> %va
-  ret <vscale x 1 x i64> %vc
-}
-
-define <vscale x 1 x i64> @vmerge_iv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv1i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 3, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x i64> %splat, <vscale x 1 x i64> %va
-  ret <vscale x 1 x i64> %vc
-}
-
-define <vscale x 2 x i64> @vmerge_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv2i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x i64> %va, <vscale x 2 x i64> %vb
-  ret <vscale x 2 x i64> %vc
-}
-
-define <vscale x 2 x i64> @vmerge_xv_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv2i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vlse64.v v8, (a0), zero, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x i64> %splat, <vscale x 2 x i64> %va
-  ret <vscale x 2 x i64> %vc
-}
-
-define <vscale x 2 x i64> @vmerge_iv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv2i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 3, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x i64> %splat, <vscale x 2 x i64> %va
-  ret <vscale x 2 x i64> %vc
-}
-
-define <vscale x 4 x i64> @vmerge_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv4i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x i64> %va, <vscale x 4 x i64> %vb
-  ret <vscale x 4 x i64> %vc
-}
-
-define <vscale x 4 x i64> @vmerge_xv_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv4i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vlse64.v v8, (a0), zero, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x i64> %splat, <vscale x 4 x i64> %va
-  ret <vscale x 4 x i64> %vc
-}
-
-define <vscale x 4 x i64> @vmerge_iv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv4i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 3, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x i64> %splat, <vscale x 4 x i64> %va
-  ret <vscale x 4 x i64> %vc
-}
-
-define <vscale x 8 x i64> @vmerge_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vmerge_vv_nxv8i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x i64> %va, <vscale x 8 x i64> %vb
-  ret <vscale x 8 x i64> %vc
-}
-
-define <vscale x 8 x i64> @vmerge_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv8i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vlse64.v v8, (a0), zero, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x i64> %splat, <vscale x 8 x i64> %va
-  ret <vscale x 8 x i64> %vc
-}
-
-define <vscale x 8 x i64> @vmerge_iv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vmerge_iv_nxv8i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
-; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 3, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x i64> %splat, <vscale x 8 x i64> %va
-  ret <vscale x 8 x i64> %vc
-}
-
-define <vscale x 8 x i64> @vmerge_truelhs_nxv8i64_0(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
-; CHECK-LABEL: vmerge_truelhs_nxv8i64_0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    ret
-  %mhead = insertelement <vscale x 8 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <vscale x 8 x i1> %mhead, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = select <vscale x 8 x i1> %mtrue, <vscale x 8 x i64> %va, <vscale x 8 x i64> %vb
-  ret <vscale x 8 x i64> %vc
-}
-
-define <vscale x 8 x i64> @vmerge_falselhs_nxv8i64_0(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
-; CHECK-LABEL: vmerge_falselhs_nxv8i64_0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-  %vc = select <vscale x 8 x i1> zeroinitializer, <vscale x 8 x i64> %va, <vscale x 8 x i64> %vb
-  ret <vscale x 8 x i64> %vc
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-int.ll
similarity index 93%
rename from llvm/test/CodeGen/RISCV/rvv/vselect-int-rv64.ll
rename to llvm/test/CodeGen/RISCV/rvv/vselect-int.ll
index bc1138617caef..19c7d599cb068 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-int.ll
@@ -1,5 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s --check-prefixes=CHECK,RV64
 
 define <vscale x 1 x i8> @vmerge_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %cond) {
 ; CHECK-LABEL: vmerge_vv_nxv1i8:
@@ -658,11 +661,23 @@ define <vscale x 1 x i64> @vmerge_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1
 }
 
 define <vscale x 1 x i64> @vmerge_xv_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vscale x 1 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv1i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
-; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
+; RV32-LABEL: vmerge_xv_nxv1i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
+; RV32-NEXT:    vlse64.v v8, (a0), zero, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vmerge_xv_nxv1i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; RV64-NEXT:    vmerge.vxm v8, v8, a0, v0
+; RV64-NEXT:    ret
   %head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
   %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
   %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x i64> %splat, <vscale x 1 x i64> %va
@@ -692,11 +707,23 @@ define <vscale x 2 x i64> @vmerge_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2
 }
 
 define <vscale x 2 x i64> @vmerge_xv_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vscale x 2 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv2i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
-; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
+; RV32-LABEL: vmerge_xv_nxv2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
+; RV32-NEXT:    vlse64.v v8, (a0), zero, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vmerge_xv_nxv2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
+; RV64-NEXT:    vmerge.vxm v8, v8, a0, v0
+; RV64-NEXT:    ret
   %head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
   %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
   %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x i64> %splat, <vscale x 2 x i64> %va
@@ -726,11 +753,23 @@ define <vscale x 4 x i64> @vmerge_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4
 }
 
 define <vscale x 4 x i64> @vmerge_xv_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vscale x 4 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv4i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
-; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
+; RV32-LABEL: vmerge_xv_nxv4i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
+; RV32-NEXT:    vlse64.v v8, (a0), zero, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vmerge_xv_nxv4i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
+; RV64-NEXT:    vmerge.vxm v8, v8, a0, v0
+; RV64-NEXT:    ret
   %head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
   %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
   %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x i64> %splat, <vscale x 4 x i64> %va
@@ -760,11 +799,23 @@ define <vscale x 8 x i64> @vmerge_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8
 }
 
 define <vscale x 8 x i64> @vmerge_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vscale x 8 x i1> %cond) {
-; CHECK-LABEL: vmerge_xv_nxv8i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
-; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    ret
+; RV32-LABEL: vmerge_xv_nxv8i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
+; RV32-NEXT:    vlse64.v v8, (a0), zero, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vmerge_xv_nxv8i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; RV64-NEXT:    vmerge.vxm v8, v8, a0, v0
+; RV64-NEXT:    ret
   %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
   %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
   %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x i64> %splat, <vscale x 8 x i64> %va

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll
deleted file mode 100644
index 59407e81b42bb..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll
+++ /dev/null
@@ -1,2800 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vxor_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vxor_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vxor_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vxor_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vxor_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vxor_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vxor_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vxor_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vxor.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vxor_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vxor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vxor_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vxor.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vxor_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vxor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vxor_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vxor.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i1>,
-  i64,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8r.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vxor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    <vscale x 64 x i8> %2,
-    <vscale x 64 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vxor_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vxor_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vxor_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vxor_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vxor_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vxor_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vxor_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vxor.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vxor_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vxor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vxor_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vxor.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vxor_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vxor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vxor_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vxor.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re16.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vxor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    <vscale x 32 x i16> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vxor_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vxor_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vxor_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vxor_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vxor_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vxor.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vxor_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vxor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vxor_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vxor.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vxor_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vxor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vxor_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vxor.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re32.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vxor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vxor_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vxor_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vxor_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vxor.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vxor_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vxor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vxor_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vxor.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vxor_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vxor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vxor_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT:    vxor.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vxor_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re64.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vxor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i8,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vxor_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vxor_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i8 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i8,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vxor_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv2i8_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vxor_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i8_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i8 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i8,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vxor_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv4i8_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vxor_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i8_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i8 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i8,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vxor_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv8i8_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vxor_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i8_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i8 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i8,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vxor_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv16i8_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vxor_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i8_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vxor.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i8 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i8,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vxor_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv32i8_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vxor_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv32i8_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vxor.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i8 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i8,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vxor_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv64i8_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i8,
-  <vscale x 64 x i1>,
-  i64,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vxor_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv64i8_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vxor.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    i8 %2,
-    <vscale x 64 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i16,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vxor_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv1i16_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vxor_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i16_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i16 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i16,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vxor_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv2i16_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vxor_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i16_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i16 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i16,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vxor_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv4i16_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vxor_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i16_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i16 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i16,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vxor_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv8i16_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vxor_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i16_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vxor.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i16 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i16,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vxor_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv16i16_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vxor_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i16_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vxor.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i16 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i16,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vxor_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv32i16_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i16,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vxor_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv32i16_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vxor.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i16 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vxor_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv1i32_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vxor_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i32_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vxor_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv2i32_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vxor_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i32_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vxor_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv4i32_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vxor_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i32_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vxor.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vxor_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv8i32_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vxor_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i32_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vxor.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vxor_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv16i32_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vxor_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i32_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vxor.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i32 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vxor_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vxor_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vxor_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vxor_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vxor.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vxor_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vxor_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vxor.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vxor_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vxor_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vxor.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i64 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vxor_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    i8 9,
-    i64 %1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vxor_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i8 9,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-define <vscale x 2 x i8> @intrinsic_vxor_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv2i8_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    i8 9,
-    i64 %1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-define <vscale x 2 x i8> @intrinsic_vxor_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i8_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i8 9,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-define <vscale x 4 x i8> @intrinsic_vxor_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv4i8_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    i8 9,
-    i64 %1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-define <vscale x 4 x i8> @intrinsic_vxor_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i8_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i8 9,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-define <vscale x 8 x i8> @intrinsic_vxor_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv8i8_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    i8 9,
-    i64 %1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-define <vscale x 8 x i8> @intrinsic_vxor_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i8_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i8 9,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-define <vscale x 16 x i8> @intrinsic_vxor_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv16i8_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    i8 9,
-    i64 %1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-define <vscale x 16 x i8> @intrinsic_vxor_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv16i8_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i8 9,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-define <vscale x 32 x i8> @intrinsic_vxor_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv32i8_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    i8 9,
-    i64 %1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-define <vscale x 32 x i8> @intrinsic_vxor_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv32i8_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i8 9,
-    <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-define <vscale x 64 x i8> @intrinsic_vxor_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv64i8_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    i8 9,
-    i64 %1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-define <vscale x 64 x i8> @intrinsic_vxor_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv64i8_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    i8 9,
-    <vscale x 64 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vxor_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv1i16_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    i16 9,
-    i64 %1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vxor_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i16_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i16 9,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vxor_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv2i16_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    i16 9,
-    i64 %1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vxor_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i16_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i16 9,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vxor_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv4i16_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    i16 9,
-    i64 %1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vxor_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i16_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i16 9,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vxor_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv8i16_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    i16 9,
-    i64 %1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vxor_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i16_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i16 9,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vxor_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv16i16_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    i16 9,
-    i64 %1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vxor_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv16i16_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i16 9,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vxor_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv32i16_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    i16 9,
-    i64 %1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vxor_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv32i16_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i16 9,
-    <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vxor_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv1i32_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    i32 9,
-    i64 %1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vxor_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i32_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i32 9,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vxor_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv2i32_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    i32 9,
-    i64 %1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vxor_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i32_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i32 9,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vxor_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv4i32_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    i32 9,
-    i64 %1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vxor_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i32_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i32 9,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vxor_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv8i32_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    i32 9,
-    i64 %1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vxor_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i32_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i32 9,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 16 x i32> @intrinsic_vxor_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv16i32_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    i32 9,
-    i64 %1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 16 x i32> @intrinsic_vxor_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv16i32_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i32 9,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vxor_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vxor_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 9,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vxor_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vxor_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 9,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vxor_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vxor_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 9,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vxor_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vi_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vxor_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vxor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i64 9,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vxor.ll
similarity index 83%
rename from llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vxor.ll
index b83f2abbcc174..3ec594d68a46c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vxor.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
 declare <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -18,7 +20,7 @@ entry:
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -28,10 +30,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vxor_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vxor_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -43,7 +45,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -52,9 +54,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vxor_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vxor_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -65,7 +67,7 @@ entry:
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -75,10 +77,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vxor_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vxor_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -90,7 +92,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -99,9 +101,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vxor_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vxor_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -112,7 +114,7 @@ entry:
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -122,10 +124,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vxor_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vxor_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -137,7 +139,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -146,9 +148,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vxor_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vxor_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -159,7 +161,7 @@ entry:
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -169,10 +171,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vxor_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vxor_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -184,7 +186,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -193,9 +195,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vxor_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vxor_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -206,7 +208,7 @@ entry:
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -216,10 +218,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vxor_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vxor_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -231,7 +233,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -240,9 +242,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vxor_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vxor_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -253,7 +255,7 @@ entry:
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
@@ -263,10 +265,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vxor_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vxor_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -278,7 +280,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -287,9 +289,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vxor_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vxor_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
@@ -300,7 +302,7 @@ entry:
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i8> %a
 }
@@ -310,10 +312,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8r.v v24, (a0)
@@ -326,7 +328,7 @@ entry:
     <vscale x 64 x i8> %1,
     <vscale x 64 x i8> %2,
     <vscale x 64 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 64 x i8> %a
 }
@@ -335,9 +337,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vxor_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vxor_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -348,7 +350,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -358,10 +360,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vxor_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vxor_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -373,7 +375,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -382,9 +384,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vxor_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vxor_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -395,7 +397,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -405,10 +407,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vxor_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vxor_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -420,7 +422,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -429,9 +431,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vxor_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vxor_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -442,7 +444,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -452,10 +454,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vxor_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vxor_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -467,7 +469,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -476,9 +478,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vxor_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vxor_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -489,7 +491,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -499,10 +501,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vxor_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vxor_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -514,7 +516,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -523,9 +525,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vxor_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vxor_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -536,7 +538,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -546,10 +548,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vxor_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vxor_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -561,7 +563,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -570,9 +572,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vxor_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vxor_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
@@ -583,7 +585,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -593,10 +595,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
@@ -609,7 +611,7 @@ entry:
     <vscale x 32 x i16> %1,
     <vscale x 32 x i16> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -618,9 +620,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vxor_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vxor_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -631,7 +633,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -641,10 +643,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vxor_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vxor_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -656,7 +658,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -665,9 +667,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vxor_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vxor_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -678,7 +680,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -688,10 +690,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vxor_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vxor_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -703,7 +705,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -712,9 +714,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vxor_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vxor_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -725,7 +727,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -735,10 +737,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vxor_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vxor_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -750,7 +752,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -759,9 +761,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vxor_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vxor_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -772,7 +774,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -782,10 +784,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vxor_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vxor_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -797,7 +799,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -806,9 +808,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vxor_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vxor_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
@@ -819,7 +821,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -829,10 +831,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
@@ -845,7 +847,7 @@ entry:
     <vscale x 16 x i32> %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -854,9 +856,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vxor_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vxor_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -867,7 +869,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -877,10 +879,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vxor_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vxor_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -892,7 +894,7 @@ entry:
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -901,9 +903,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vxor_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vxor_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -914,7 +916,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -924,10 +926,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vxor_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vxor_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -939,7 +941,7 @@ entry:
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -948,9 +950,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vxor_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vxor_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -961,7 +963,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -971,10 +973,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vxor_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vxor_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -986,7 +988,7 @@ entry:
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -995,9 +997,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vxor_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vxor_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
@@ -1008,7 +1010,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1018,10 +1020,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vxor_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vxor_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
@@ -1034,7 +1036,7 @@ entry:
     <vscale x 8 x i64> %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1043,9 +1045,9 @@ declare <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vxor_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vxor_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -1056,7 +1058,7 @@ entry:
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -1066,10 +1068,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vxor_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vxor_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -1081,7 +1083,7 @@ entry:
     <vscale x 1 x i8> %1,
     i8 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -1090,9 +1092,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vxor_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vxor_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -1103,7 +1105,7 @@ entry:
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -1113,10 +1115,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vxor_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vxor_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -1128,7 +1130,7 @@ entry:
     <vscale x 2 x i8> %1,
     i8 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -1137,9 +1139,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vxor_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vxor_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -1150,7 +1152,7 @@ entry:
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -1160,10 +1162,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vxor_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vxor_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -1175,7 +1177,7 @@ entry:
     <vscale x 4 x i8> %1,
     i8 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -1184,9 +1186,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vxor_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vxor_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -1197,7 +1199,7 @@ entry:
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1207,10 +1209,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vxor_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vxor_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -1222,7 +1224,7 @@ entry:
     <vscale x 8 x i8> %1,
     i8 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1231,9 +1233,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vxor_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vxor_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -1244,7 +1246,7 @@ entry:
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1254,10 +1256,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vxor_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vxor_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -1269,7 +1271,7 @@ entry:
     <vscale x 16 x i8> %1,
     i8 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1278,9 +1280,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vxor_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vxor_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -1291,7 +1293,7 @@ entry:
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1301,10 +1303,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vxor_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vxor_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1316,7 +1318,7 @@ entry:
     <vscale x 32 x i8> %1,
     i8 %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1325,9 +1327,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vxor_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vxor_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vx_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
@@ -1338,7 +1340,7 @@ entry:
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i8> %a
 }
@@ -1348,10 +1350,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8(
   <vscale x 64 x i8>,
   i8,
   <vscale x 64 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vxor_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vxor_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
@@ -1363,7 +1365,7 @@ entry:
     <vscale x 64 x i8> %1,
     i8 %2,
     <vscale x 64 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 64 x i8> %a
 }
@@ -1372,9 +1374,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vxor_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vxor_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1385,7 +1387,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1395,10 +1397,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vxor_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vxor_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1410,7 +1412,7 @@ entry:
     <vscale x 1 x i16> %1,
     i16 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1419,9 +1421,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vxor_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vxor_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1432,7 +1434,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1442,10 +1444,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vxor_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vxor_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1457,7 +1459,7 @@ entry:
     <vscale x 2 x i16> %1,
     i16 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1466,9 +1468,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vxor_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vxor_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1479,7 +1481,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1489,10 +1491,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vxor_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vxor_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1504,7 +1506,7 @@ entry:
     <vscale x 4 x i16> %1,
     i16 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1513,9 +1515,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vxor_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vxor_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1526,7 +1528,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1536,10 +1538,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vxor_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vxor_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1551,7 +1553,7 @@ entry:
     <vscale x 8 x i16> %1,
     i16 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1560,9 +1562,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vxor_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vxor_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1573,7 +1575,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1583,10 +1585,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vxor_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vxor_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1598,7 +1600,7 @@ entry:
     <vscale x 16 x i16> %1,
     i16 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1607,9 +1609,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vxor_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vxor_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vx_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -1620,7 +1622,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1630,10 +1632,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16(
   <vscale x 32 x i16>,
   i16,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vxor_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vxor_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -1645,7 +1647,7 @@ entry:
     <vscale x 32 x i16> %1,
     i16 %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1654,9 +1656,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vxor_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vxor_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1667,7 +1669,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1677,10 +1679,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vxor_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vxor_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1692,7 +1694,7 @@ entry:
     <vscale x 1 x i32> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1701,9 +1703,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vxor_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vxor_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1714,7 +1716,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1724,10 +1726,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vxor_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vxor_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1739,7 +1741,7 @@ entry:
     <vscale x 2 x i32> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1748,9 +1750,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vxor_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vxor_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1761,7 +1763,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1771,10 +1773,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vxor_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vxor_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1786,7 +1788,7 @@ entry:
     <vscale x 4 x i32> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1795,9 +1797,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vxor_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vxor_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1808,7 +1810,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1818,10 +1820,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vxor_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vxor_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1833,7 +1835,7 @@ entry:
     <vscale x 8 x i32> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1842,9 +1844,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vxor_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vxor_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vx_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -1855,7 +1857,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1865,10 +1867,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32(
   <vscale x 16 x i32>,
   i32,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vxor_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vxor_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -1880,7 +1882,7 @@ entry:
     <vscale x 16 x i32> %1,
     i32 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1889,26 +1891,32 @@ declare <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   i64,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vxor_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT:    vlse64.v v9, (a0), zero
-; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vxor_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vxor_vx_nxv1i64_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vxor.vv v8, v8, v9
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vxor_vx_nxv1i64_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT:    vxor.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64(
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1918,28 +1926,34 @@ declare <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
   <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vxor_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vxor_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vxor_mask_vx_nxv1i64_nxv1i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vxor.vv v8, v9, v10, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vxor_mask_vx_nxv1i64_nxv1i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT:    vxor.vx v8, v9, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     i64 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1948,26 +1962,32 @@ declare <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   i64,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vxor_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vxor.vv v8, v8, v10
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vxor_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vxor_vx_nxv2i64_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vxor.vv v8, v8, v10
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vxor_vx_nxv2i64_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT:    vxor.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64(
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1977,28 +1997,34 @@ declare <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
   <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vxor_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vxor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vxor_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vxor_mask_vx_nxv2i64_nxv2i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vxor.vv v8, v10, v12, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vxor_mask_vx_nxv2i64_nxv2i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT:    vxor.vx v8, v10, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     i64 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -2007,26 +2033,32 @@ declare <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   i64,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vxor_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vxor.vv v8, v8, v12
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vxor_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vxor_vx_nxv4i64_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vxor.vv v8, v8, v12
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vxor_vx_nxv4i64_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT:    vxor.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64(
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -2036,28 +2068,34 @@ declare <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
   <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vxor_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vxor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vxor_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vxor_mask_vx_nxv4i64_nxv4i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vxor.vv v8, v12, v16, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vxor_mask_vx_nxv4i64_nxv4i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT:    vxor.vx v8, v12, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     i64 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -2066,26 +2104,32 @@ declare <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   i64,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vxor_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vxor_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vxor.vv v8, v8, v16
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vxor_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vxor_vx_nxv8i64_nxv8i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vxor.vv v8, v8, v16
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vxor_vx_nxv8i64_nxv8i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT:    vxor.vx v8, v8, a0
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64(
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
     i64 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -2095,33 +2139,39 @@ declare <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64(
   <vscale x 8 x i64>,
   i64,
   <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vxor_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sw a1, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
-; CHECK-NEXT:    vlse64.v v24, (a0), zero
-; CHECK-NEXT:    vxor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vxor_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vxor_mask_vx_nxv8i64_nxv8i64_i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT:    vlse64.v v24, (a0), zero
+; RV32-NEXT:    vxor.vv v8, v16, v24, v0.t
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vxor_mask_vx_nxv8i64_nxv8i64_i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT:    vxor.vx v8, v16, a0, v0.t
+; RV64-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
     i64 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vxor_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+define <vscale x 1 x i8> @intrinsic_vxor_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vi_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -2132,12 +2182,12 @@ entry:
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
     i8 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vxor_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vxor_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -2149,12 +2199,12 @@ entry:
     <vscale x 1 x i8> %1,
     i8 9,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vxor_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+define <vscale x 2 x i8> @intrinsic_vxor_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vi_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -2165,12 +2215,12 @@ entry:
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8> %0,
     i8 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vxor_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vxor_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -2182,12 +2232,12 @@ entry:
     <vscale x 2 x i8> %1,
     i8 9,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vxor_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+define <vscale x 4 x i8> @intrinsic_vxor_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vi_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -2198,12 +2248,12 @@ entry:
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8> %0,
     i8 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vxor_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vxor_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -2215,12 +2265,12 @@ entry:
     <vscale x 4 x i8> %1,
     i8 9,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vxor_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+define <vscale x 8 x i8> @intrinsic_vxor_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vi_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -2231,12 +2281,12 @@ entry:
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8> %0,
     i8 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vxor_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vxor_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -2248,12 +2298,12 @@ entry:
     <vscale x 8 x i8> %1,
     i8 9,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vxor_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+define <vscale x 16 x i8> @intrinsic_vxor_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vi_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -2264,12 +2314,12 @@ entry:
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8> %0,
     i8 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vxor_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vxor_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -2281,12 +2331,12 @@ entry:
     <vscale x 16 x i8> %1,
     i8 9,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vxor_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+define <vscale x 32 x i8> @intrinsic_vxor_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vi_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -2297,12 +2347,12 @@ entry:
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8> %0,
     i8 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vxor_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vxor_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -2314,12 +2364,12 @@ entry:
     <vscale x 32 x i8> %1,
     i8 9,
     <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 64 x i8> @intrinsic_vxor_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i32 %1) nounwind {
+define <vscale x 64 x i8> @intrinsic_vxor_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vi_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
@@ -2330,12 +2380,12 @@ entry:
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8> %0,
     i8 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 64 x i8> %a
 }
 
-define <vscale x 64 x i8> @intrinsic_vxor_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+define <vscale x 64 x i8> @intrinsic_vxor_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
@@ -2347,12 +2397,12 @@ entry:
     <vscale x 64 x i8> %1,
     i8 9,
     <vscale x 64 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 64 x i8> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vxor_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+define <vscale x 1 x i16> @intrinsic_vxor_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vi_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -2363,12 +2413,12 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
     i16 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vxor_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vxor_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -2380,12 +2430,12 @@ entry:
     <vscale x 1 x i16> %1,
     i16 9,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vxor_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+define <vscale x 2 x i16> @intrinsic_vxor_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vi_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -2396,12 +2446,12 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
     i16 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vxor_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vxor_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -2413,12 +2463,12 @@ entry:
     <vscale x 2 x i16> %1,
     i16 9,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vxor_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+define <vscale x 4 x i16> @intrinsic_vxor_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vi_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -2429,12 +2479,12 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
     i16 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vxor_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vxor_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -2446,12 +2496,12 @@ entry:
     <vscale x 4 x i16> %1,
     i16 9,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vxor_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+define <vscale x 8 x i16> @intrinsic_vxor_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vi_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -2462,12 +2512,12 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
     i16 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vxor_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vxor_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -2479,12 +2529,12 @@ entry:
     <vscale x 8 x i16> %1,
     i16 9,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vxor_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+define <vscale x 16 x i16> @intrinsic_vxor_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vi_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -2495,12 +2545,12 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
     i16 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vxor_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vxor_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -2512,12 +2562,12 @@ entry:
     <vscale x 16 x i16> %1,
     i16 9,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 32 x i16> @intrinsic_vxor_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i32 %1) nounwind {
+define <vscale x 32 x i16> @intrinsic_vxor_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vi_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
@@ -2528,12 +2578,12 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
     i16 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 32 x i16> %a
 }
 
-define <vscale x 32 x i16> @intrinsic_vxor_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i16> @intrinsic_vxor_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -2545,12 +2595,12 @@ entry:
     <vscale x 32 x i16> %1,
     i16 9,
     <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vxor_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+define <vscale x 1 x i32> @intrinsic_vxor_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vi_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -2561,12 +2611,12 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
     i32 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vxor_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vxor_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -2578,12 +2628,12 @@ entry:
     <vscale x 1 x i32> %1,
     i32 9,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vxor_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+define <vscale x 2 x i32> @intrinsic_vxor_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vi_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -2594,12 +2644,12 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
     i32 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vxor_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vxor_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -2611,12 +2661,12 @@ entry:
     <vscale x 2 x i32> %1,
     i32 9,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vxor_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+define <vscale x 4 x i32> @intrinsic_vxor_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vi_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -2627,12 +2677,12 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
     i32 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vxor_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vxor_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -2644,12 +2694,12 @@ entry:
     <vscale x 4 x i32> %1,
     i32 9,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vxor_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+define <vscale x 8 x i32> @intrinsic_vxor_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vi_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -2660,12 +2710,12 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
     i32 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vxor_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vxor_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -2677,12 +2727,12 @@ entry:
     <vscale x 8 x i32> %1,
     i32 9,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 16 x i32> @intrinsic_vxor_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1) nounwind {
+define <vscale x 16 x i32> @intrinsic_vxor_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vi_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
@@ -2693,12 +2743,12 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
     i32 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i32> %a
 }
 
-define <vscale x 16 x i32> @intrinsic_vxor_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vxor_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -2710,12 +2760,12 @@ entry:
     <vscale x 16 x i32> %1,
     i32 9,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
 
-define <vscale x 1 x i64> @intrinsic_vxor_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
+define <vscale x 1 x i64> @intrinsic_vxor_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vi_nxv1i64_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -2726,12 +2776,12 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
     i64 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i64> %a
 }
 
-define <vscale x 1 x i64> @intrinsic_vxor_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vxor_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i64_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -2743,12 +2793,12 @@ entry:
     <vscale x 1 x i64> %1,
     i64 9,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
 
-define <vscale x 2 x i64> @intrinsic_vxor_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
+define <vscale x 2 x i64> @intrinsic_vxor_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vi_nxv2i64_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -2759,12 +2809,12 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
     i64 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i64> %a
 }
 
-define <vscale x 2 x i64> @intrinsic_vxor_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vxor_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i64_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -2776,12 +2826,12 @@ entry:
     <vscale x 2 x i64> %1,
     i64 9,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
 
-define <vscale x 4 x i64> @intrinsic_vxor_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
+define <vscale x 4 x i64> @intrinsic_vxor_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vi_nxv4i64_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -2792,12 +2842,12 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
     i64 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i64> %a
 }
 
-define <vscale x 4 x i64> @intrinsic_vxor_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vxor_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i64_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -2809,12 +2859,12 @@ entry:
     <vscale x 4 x i64> %1,
     i64 9,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
 
-define <vscale x 8 x i64> @intrinsic_vxor_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i32 %1) nounwind {
+define <vscale x 8 x i64> @intrinsic_vxor_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_vi_nxv8i64_nxv8i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
@@ -2825,12 +2875,12 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
     i64 9,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i64> %a
 }
 
-define <vscale x 8 x i64> @intrinsic_vxor_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vxor_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i64_nxv8i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -2842,7 +2892,7 @@ entry:
     <vscale x 8 x i64> %1,
     i64 9,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }


        


More information about the llvm-commits mailing list