[llvm] 0fd212c - [RISCV] Merge rv32/rv64 vector widening intrinsic tests that have the same content. NFC.

Jim Lin via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 11 01:25:57 PDT 2023


Author: Jim Lin
Date: 2023-07-11T16:25:12+08:00
New Revision: 0fd212c91d2be638ae25798c1e0b10cf6910cc55

URL: https://github.com/llvm/llvm-project/commit/0fd212c91d2be638ae25798c1e0b10cf6910cc55
DIFF: https://github.com/llvm/llvm-project/commit/0fd212c91d2be638ae25798c1e0b10cf6910cc55.diff

LOG: [RISCV] Merge rv32/rv64 vector widening intrinsic tests that have the same content. NFC.

Added: 
    llvm/test/CodeGen/RISCV/rvv/vwadd.ll
    llvm/test/CodeGen/RISCV/rvv/vwadd.w.ll
    llvm/test/CodeGen/RISCV/rvv/vwaddu.ll
    llvm/test/CodeGen/RISCV/rvv/vwaddu.w.ll
    llvm/test/CodeGen/RISCV/rvv/vwmacc.ll
    llvm/test/CodeGen/RISCV/rvv/vwmaccsu.ll
    llvm/test/CodeGen/RISCV/rvv/vwmaccu.ll
    llvm/test/CodeGen/RISCV/rvv/vwmaccus.ll
    llvm/test/CodeGen/RISCV/rvv/vwmul.ll
    llvm/test/CodeGen/RISCV/rvv/vwmulsu.ll
    llvm/test/CodeGen/RISCV/rvv/vwmulu.ll
    llvm/test/CodeGen/RISCV/rvv/vwredsum.ll
    llvm/test/CodeGen/RISCV/rvv/vwredsumu.ll
    llvm/test/CodeGen/RISCV/rvv/vwsub.ll
    llvm/test/CodeGen/RISCV/rvv/vwsub.w.ll
    llvm/test/CodeGen/RISCV/rvv/vwsubu.ll
    llvm/test/CodeGen/RISCV/rvv/vwsubu.w.ll

Modified: 
    

Removed: 
    llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwredsum-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwredsum-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll
deleted file mode 100644
index 8429f235b9442d..00000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll
+++ /dev/null
@@ -1,1442 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vwadd.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwadd_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_vv_nxv2i16_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vwadd.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwadd_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv2i16_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwadd_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_vv_nxv4i16_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vwadd.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwadd_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv4i16_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwadd_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_vv_nxv8i16_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vwadd.vv v10, v8, v9
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwadd_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv8i16_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vwadd.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwadd_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_vv_nxv16i16_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vwadd.vv v12, v8, v10
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwadd_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv16i16_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vwadd.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwadd_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_vv_nxv32i16_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vwadd.vv v16, v8, v12
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwadd_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv32i16_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vwadd.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwadd_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_vv_nxv1i32_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vwadd.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwadd_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i32_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwadd_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_vv_nxv2i32_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vwadd.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwadd_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv2i32_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwadd_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_vv_nxv4i32_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vwadd.vv v10, v8, v9
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwadd_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv4i32_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vwadd.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwadd_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_vv_nxv8i32_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vwadd.vv v12, v8, v10
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwadd_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv8i32_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vwadd.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwadd_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_vv_nxv16i32_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vwadd.vv v16, v8, v12
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwadd_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv16i32_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vwadd.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwadd_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_vv_nxv1i64_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vwadd.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwadd_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i64_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwadd_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_vv_nxv2i64_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vwadd.vv v10, v8, v9
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwadd_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv2i64_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vwadd.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwadd_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_vv_nxv4i64_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vwadd.vv v12, v8, v10
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwadd_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv4i64_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vwadd.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwadd_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_vv_nxv8i64_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vwadd.vv v16, v8, v12
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwadd_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv8i64_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vwadd.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  i8,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwadd_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_vx_nxv1i16_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vwadd.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwadd_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv1i16_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vwadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i8> %1,
-    i8 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  i8,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwadd_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_vx_nxv2i16_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vwadd.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwadd_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv2i16_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vwadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i8> %1,
-    i8 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  i8,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwadd_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_vx_nxv4i16_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vwadd.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwadd_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv4i16_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vwadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i8> %1,
-    i8 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  i8,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwadd_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_vx_nxv8i16_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vwadd.vx v10, v8, a0
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwadd_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv8i16_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vwadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i8> %1,
-    i8 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  i8,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwadd_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_vx_nxv16i16_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vwadd.vx v12, v8, a0
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwadd_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv16i16_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vwadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i8> %1,
-    i8 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  i8,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwadd_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_vx_nxv32i16_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vwadd.vx v16, v8, a0
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwadd_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv32i16_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vwadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i8> %1,
-    i8 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  i16,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwadd_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_vx_nxv1i32_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vwadd.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwadd_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv1i32_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vwadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i16> %1,
-    i16 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  i16,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwadd_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_vx_nxv2i32_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vwadd.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwadd_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv2i32_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vwadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i16> %1,
-    i16 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  i16,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwadd_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_vx_nxv4i32_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vwadd.vx v10, v8, a0
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwadd_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv4i32_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vwadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i16> %1,
-    i16 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  i16,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwadd_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_vx_nxv8i32_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vwadd.vx v12, v8, a0
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwadd_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv8i32_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vwadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i16> %1,
-    i16 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  i16,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwadd_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_vx_nxv16i32_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vwadd.vx v16, v8, a0
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwadd_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv16i32_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vwadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i16> %1,
-    i16 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i32,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwadd_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_vx_nxv1i64_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vwadd.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwadd_mask_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv1i64_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vwadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i32,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwadd_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_vx_nxv2i64_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vwadd.vx v10, v8, a0
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwadd_mask_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv2i64_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vwadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i32,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwadd_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_vx_nxv4i64_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vwadd.vx v12, v8, a0
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwadd_mask_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv4i64_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vwadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i32,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwadd_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_vx_nxv8i64_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vwadd.vx v16, v8, a0
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwadd_mask_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv8i64_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vwadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd.ll
similarity index 87%
rename from llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vwadd.ll
index 716a18f5e1d1eb..653a84e8186f06 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwadd.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -19,7 +21,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -29,10 +31,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -44,7 +46,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -53,9 +55,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwadd_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwadd_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_vv_nxv2i16_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -67,7 +69,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -77,10 +79,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwadd_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwadd_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv2i16_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -92,7 +94,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -101,9 +103,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwadd_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwadd_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_vv_nxv4i16_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -115,7 +117,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -125,10 +127,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwadd_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwadd_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv4i16_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -140,7 +142,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -149,9 +151,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwadd_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwadd_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_vv_nxv8i16_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -163,7 +165,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -173,10 +175,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwadd_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwadd_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv8i16_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -188,7 +190,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -197,9 +199,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwadd_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwadd_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_vv_nxv16i16_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -211,7 +213,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -221,10 +223,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwadd_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwadd_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv16i16_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -236,7 +238,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -245,9 +247,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwadd_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwadd_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_vv_nxv32i16_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -259,7 +261,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -269,10 +271,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwadd_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwadd_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv32i16_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -284,7 +286,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -293,9 +295,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwadd_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwadd_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_vv_nxv1i32_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -307,7 +309,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -317,10 +319,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwadd_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwadd_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i32_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -332,7 +334,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -341,9 +343,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwadd_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwadd_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_vv_nxv2i32_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -355,7 +357,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -365,10 +367,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwadd_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwadd_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv2i32_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -380,7 +382,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -389,9 +391,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwadd_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwadd_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_vv_nxv4i32_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -403,7 +405,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -413,10 +415,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwadd_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwadd_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv4i32_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -428,7 +430,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -437,9 +439,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwadd_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwadd_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_vv_nxv8i32_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -451,7 +453,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -461,10 +463,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwadd_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwadd_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv8i32_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -476,7 +478,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -485,9 +487,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwadd_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwadd_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_vv_nxv16i32_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -499,7 +501,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -509,10 +511,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwadd_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwadd_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv16i32_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -524,7 +526,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -533,9 +535,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwadd_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwadd_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_vv_nxv1i64_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -547,7 +549,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -557,10 +559,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwadd_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwadd_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i64_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -572,7 +574,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -581,9 +583,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwadd_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwadd_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_vv_nxv2i64_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -595,7 +597,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -605,10 +607,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwadd_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwadd_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv2i64_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -620,7 +622,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -629,9 +631,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwadd_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwadd_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_vv_nxv4i64_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -643,7 +645,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -653,10 +655,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwadd_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwadd_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv4i64_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -668,7 +670,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -677,9 +679,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwadd_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwadd_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_vv_nxv8i64_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -691,7 +693,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -701,10 +703,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwadd_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwadd_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv8i64_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -716,7 +718,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -725,9 +727,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwadd_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwadd_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_vx_nxv1i16_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -739,7 +741,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -749,10 +751,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwadd_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwadd_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv1i16_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -764,7 +766,7 @@ entry:
     <vscale x 1 x i8> %1,
     i8 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -773,9 +775,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwadd_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwadd_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_vx_nxv2i16_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -787,7 +789,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -797,10 +799,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwadd_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwadd_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv2i16_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -812,7 +814,7 @@ entry:
     <vscale x 2 x i8> %1,
     i8 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -821,9 +823,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwadd_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwadd_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_vx_nxv4i16_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -835,7 +837,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -845,10 +847,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwadd_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwadd_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv4i16_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -860,7 +862,7 @@ entry:
     <vscale x 4 x i8> %1,
     i8 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -869,9 +871,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwadd_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwadd_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_vx_nxv8i16_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -883,7 +885,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -893,10 +895,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwadd_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwadd_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv8i16_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -908,7 +910,7 @@ entry:
     <vscale x 8 x i8> %1,
     i8 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -917,9 +919,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwadd_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwadd_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_vx_nxv16i16_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -931,7 +933,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -941,10 +943,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwadd_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwadd_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv16i16_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -956,7 +958,7 @@ entry:
     <vscale x 16 x i8> %1,
     i8 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -965,9 +967,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwadd_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwadd_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_vx_nxv32i16_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -979,7 +981,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -989,10 +991,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwadd_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwadd_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv32i16_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1004,7 +1006,7 @@ entry:
     <vscale x 32 x i8> %1,
     i8 %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1013,9 +1015,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwadd_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwadd_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_vx_nxv1i32_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1027,7 +1029,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1037,10 +1039,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwadd_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwadd_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv1i32_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1052,7 +1054,7 @@ entry:
     <vscale x 1 x i16> %1,
     i16 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1061,9 +1063,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwadd_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwadd_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_vx_nxv2i32_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1075,7 +1077,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1085,10 +1087,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwadd_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwadd_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv2i32_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1100,7 +1102,7 @@ entry:
     <vscale x 2 x i16> %1,
     i16 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1109,9 +1111,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwadd_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwadd_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_vx_nxv4i32_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1123,7 +1125,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1133,10 +1135,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwadd_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwadd_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv4i32_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1148,7 +1150,7 @@ entry:
     <vscale x 4 x i16> %1,
     i16 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1157,9 +1159,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwadd_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwadd_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_vx_nxv8i32_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1171,7 +1173,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1181,10 +1183,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwadd_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwadd_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv8i32_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1196,7 +1198,7 @@ entry:
     <vscale x 8 x i16> %1,
     i16 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1205,9 +1207,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwadd_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwadd_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_vx_nxv16i32_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1219,7 +1221,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1229,10 +1231,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwadd_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwadd_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv16i32_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1244,7 +1246,7 @@ entry:
     <vscale x 16 x i16> %1,
     i16 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1253,9 +1255,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwadd_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwadd_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_vx_nxv1i64_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1267,7 +1269,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1277,10 +1279,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwadd_mask_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwadd_mask_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv1i64_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1292,7 +1294,7 @@ entry:
     <vscale x 1 x i32> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1301,9 +1303,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwadd_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwadd_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_vx_nxv2i64_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1315,7 +1317,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1325,10 +1327,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwadd_mask_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwadd_mask_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv2i64_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1340,7 +1342,7 @@ entry:
     <vscale x 2 x i32> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1349,9 +1351,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwadd_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwadd_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_vx_nxv4i64_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1363,7 +1365,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1373,10 +1375,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwadd_mask_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwadd_mask_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv4i64_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1388,7 +1390,7 @@ entry:
     <vscale x 4 x i32> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1397,9 +1399,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwadd_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwadd_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_vx_nxv8i64_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1411,7 +1413,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1421,10 +1423,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwadd_mask_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwadd_mask_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv8i64_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1436,7 +1438,7 @@ entry:
     <vscale x 8 x i32> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll
deleted file mode 100644
index 502e5a9de2d473..00000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll
+++ /dev/null
@@ -1,2163 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs -early-live-intervals < %s | FileCheck %s
-declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vwadd.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i16_nxv1i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwadd.w_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv2i16_nxv2i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vwadd.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i16_nxv2i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwadd.w_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv4i16_nxv4i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vwadd.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i16_nxv4i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwadd.w_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv8i16_nxv8i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vwadd.wv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i16_nxv8i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vwadd.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwadd.w_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv16i16_nxv16i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vwadd.wv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv16i16_nxv16i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vwadd.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwadd.w_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv32i16_nxv32i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vwadd.wv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl4r.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vwadd.wv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwadd.w_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i32_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vwadd.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i32_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwadd.w_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv2i32_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vwadd.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i32_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwadd.w_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv4i32_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vwadd.wv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i32_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vwadd.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwadd.w_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv8i32_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vwadd.wv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i32_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vwadd.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwadd.w_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv16i32_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vwadd.wv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl4re16.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vwadd.wv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwadd.w_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i64_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vwadd.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwadd.w_mask_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i64_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwadd.w_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv2i64_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vwadd.wv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwadd.w_mask_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i64_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vwadd.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwadd.w_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv4i64_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vwadd.wv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwadd.w_mask_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i64_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vwadd.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwadd.w_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv8i64_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vwadd.wv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i64_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl4re32.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vwadd.wv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i8,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwadd.w_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv1i16_nxv1i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.i8(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i8,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i16_nxv1i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i8 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i8,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwadd.w_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv2i16_nxv2i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.i8(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i8,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i16_nxv2i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i8 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i8,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwadd.w_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv4i16_nxv4i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.i8(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i8,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i16_nxv4i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i8 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i8,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwadd.w_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv8i16_nxv8i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.i8(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i8,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i16_nxv8i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vwadd.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i8 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i8,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwadd.w_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv16i16_nxv16i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.i8(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i8,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv16i16_nxv16i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vwadd.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i8 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i8,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwadd.w_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv32i16_nxv32i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.i8(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i8,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv32i16_nxv32i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vwadd.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i8 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i16,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwadd.w_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv1i32_nxv1i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.i16(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i16,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i32_nxv1i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i16 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i16,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwadd.w_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv2i32_nxv2i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.i16(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i16,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i32_nxv2i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i16 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i16,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwadd.w_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv4i32_nxv4i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.i16(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i16,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i32_nxv4i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vwadd.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i16 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i16,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwadd.w_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv8i32_nxv8i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.i16(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i16,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i32_nxv8i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vwadd.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i16 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i16,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwadd.w_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv16i32_nxv16i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.i16(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i16,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv16i32_nxv16i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vwadd.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i16 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i32,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwadd.w_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv1i64_nxv1i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i32,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwadd.w_mask_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i64_nxv1i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i32,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwadd.w_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv2i64_nxv2i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.i32(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i32,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwadd.w_mask_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i64_nxv2i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vwadd.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i32,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwadd.w_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv4i64_nxv4i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.i32(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i32,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwadd.w_mask_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i64_nxv4i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vwadd.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i32,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwadd.w_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv8i64_nxv8i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.i32(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i32,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i64_nxv8i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vwadd.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vwadd.wv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vwadd.wv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vwadd.wv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vwadd.wv v8, v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vwadd.wv v8, v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vwadd.wv v8, v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vwadd.wv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vwadd.wv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vwadd.wv v8, v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vwadd.wv v8, v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vwadd.wv v8, v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vwadd.wv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vwadd.wv v8, v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vwadd.wv v8, v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vwadd.wv v8, v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv1i16_nxv1i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %0,
-    i8 %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv2i16_nxv2i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %0,
-    i8 %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv4i16_nxv4i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %0,
-    i8 %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv8i16_nxv8i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %0,
-    i8 %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv16i16_nxv16i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %0,
-    i8 %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv32i16_nxv32i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %0,
-    i8 %1,
-    <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv1i32_nxv1i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %0,
-    i16 %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv2i32_nxv2i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %0,
-    i16 %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv4i32_nxv4i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %0,
-    i16 %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv8i32_nxv8i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %0,
-    i16 %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv16i32_nxv16i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %0,
-    i16 %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vwadd.w_mask_wx_tie_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv1i64_nxv1i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %0,
-    i32 %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vwadd.w_mask_wx_tie_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv2i64_nxv2i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %0,
-    i32 %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vwadd.w_mask_wx_tie_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv4i64_nxv4i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %0,
-    i32 %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wx_tie_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv8i64_nxv8i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %0,
-    i32 %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vwadd.w_wv_untie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv1i16_nxv1i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vwadd.wv v10, v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i8> %0,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vwadd.w_wv_untie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv2i16_nxv2i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vwadd.wv v10, v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i8> %0,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vwadd.w_wv_untie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv4i16_nxv4i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vwadd.wv v10, v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i8> %0,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vwadd.w_wv_untie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv8i16_nxv8i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vwadd.wv v12, v10, v8
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i8> %0,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vwadd.w_wv_untie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv16i16_nxv16i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vwadd.wv v16, v12, v8
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i8> %0,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vwadd.w_wv_untie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv32i16_nxv32i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vwadd.wv v24, v16, v8
-; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %1,
-    <vscale x 32 x i8> %0,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vwadd.w_wv_untie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv1i32_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vwadd.wv v10, v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i16> %0,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vwadd.w_wv_untie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv2i32_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vwadd.wv v10, v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i16> %0,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vwadd.w_wv_untie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv4i32_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vwadd.wv v12, v10, v8
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i16> %0,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vwadd.w_wv_untie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv8i32_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vwadd.wv v16, v12, v8
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i16> %0,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vwadd.w_wv_untie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv1i64_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vwadd.wv v10, v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %0,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vwadd.w_wv_untie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv2i64_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vwadd.wv v12, v10, v8
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %0,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vwadd.w_wv_untie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv4i64_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vwadd.wv v16, v12, v8
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %0,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vwadd.w_wv_untie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv8i64_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vwadd.wv v24, v16, v8
-; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %0,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd.w.ll
similarity index 87%
rename from llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vwadd.w.ll
index 137979732cb76e..44c925f07f277e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwadd.w.ll
@@ -1,13 +1,19 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs -early-live-intervals < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs -early-live-intervals | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs -early-live-intervals | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -18,7 +24,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -28,10 +34,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -43,7 +49,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -52,9 +58,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwadd.w_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwadd.w_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -65,7 +71,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -75,10 +81,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -90,7 +96,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -99,9 +105,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwadd.w_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwadd.w_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -112,7 +118,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -122,10 +128,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -137,7 +143,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -146,9 +152,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwadd.w_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwadd.w_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -159,7 +165,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -169,10 +175,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -184,7 +190,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -193,9 +199,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwadd.w_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwadd.w_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -206,7 +212,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -216,10 +222,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -231,7 +237,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -240,9 +246,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwadd.w_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwadd.w_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -253,7 +259,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
     <vscale x 32 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -263,10 +269,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl4r.v v24, (a0)
@@ -279,7 +285,7 @@ entry:
     <vscale x 32 x i16> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -288,9 +294,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwadd.w_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwadd.w_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -301,7 +307,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -311,10 +317,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -326,7 +332,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -335,9 +341,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwadd.w_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwadd.w_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -348,7 +354,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -358,10 +364,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -373,7 +379,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -382,9 +388,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwadd.w_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwadd.w_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -395,7 +401,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -405,10 +411,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -420,7 +426,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -429,9 +435,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwadd.w_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwadd.w_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -442,7 +448,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -452,10 +458,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -467,7 +473,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -476,9 +482,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.nxv16i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwadd.w_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwadd.w_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv16i32_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -489,7 +495,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -499,10 +505,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl4re16.v v24, (a0)
@@ -515,7 +521,7 @@ entry:
     <vscale x 16 x i32> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -524,9 +530,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwadd.w_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwadd.w_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -537,7 +543,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -547,10 +553,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwadd.w_mask_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwadd.w_mask_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -562,7 +568,7 @@ entry:
     <vscale x 1 x i64> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -571,9 +577,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwadd.w_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwadd.w_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -584,7 +590,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -594,10 +600,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwadd.w_mask_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwadd.w_mask_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -609,7 +615,7 @@ entry:
     <vscale x 2 x i64> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -618,9 +624,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwadd.w_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwadd.w_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -631,7 +637,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -641,10 +647,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwadd.w_mask_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwadd.w_mask_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -656,7 +662,7 @@ entry:
     <vscale x 4 x i64> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -665,9 +671,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwadd.w_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwadd.w_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -678,7 +684,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -688,10 +694,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl4re32.v v24, (a0)
@@ -704,7 +710,7 @@ entry:
     <vscale x 8 x i64> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -713,9 +719,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwadd.w_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwadd.w_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv1i16_nxv1i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -726,7 +732,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -736,10 +742,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.i8(
   <vscale x 1 x i16>,
   i8,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i16_nxv1i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -751,7 +757,7 @@ entry:
     <vscale x 1 x i16> %1,
     i8 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -760,9 +766,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwadd.w_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwadd.w_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv2i16_nxv2i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -773,7 +779,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -783,10 +789,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.i8(
   <vscale x 2 x i16>,
   i8,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i16_nxv2i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -798,7 +804,7 @@ entry:
     <vscale x 2 x i16> %1,
     i8 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -807,9 +813,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwadd.w_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwadd.w_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv4i16_nxv4i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -820,7 +826,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -830,10 +836,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.i8(
   <vscale x 4 x i16>,
   i8,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i16_nxv4i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -845,7 +851,7 @@ entry:
     <vscale x 4 x i16> %1,
     i8 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -854,9 +860,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwadd.w_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwadd.w_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv8i16_nxv8i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -867,7 +873,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -877,10 +883,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.i8(
   <vscale x 8 x i16>,
   i8,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i16_nxv8i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -892,7 +898,7 @@ entry:
     <vscale x 8 x i16> %1,
     i8 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -901,9 +907,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwadd.w_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwadd.w_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv16i16_nxv16i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -914,7 +920,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -924,10 +930,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.i8(
   <vscale x 16 x i16>,
   i8,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv16i16_nxv16i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -939,7 +945,7 @@ entry:
     <vscale x 16 x i16> %1,
     i8 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -948,9 +954,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwadd.w_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwadd.w_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv32i16_nxv32i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -961,7 +967,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -971,10 +977,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.i8(
   <vscale x 32 x i16>,
   i8,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv32i16_nxv32i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -986,7 +992,7 @@ entry:
     <vscale x 32 x i16> %1,
     i8 %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -995,9 +1001,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwadd.w_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwadd.w_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv1i32_nxv1i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1008,7 +1014,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1018,10 +1024,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.i16(
   <vscale x 1 x i32>,
   i16,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i32_nxv1i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1033,7 +1039,7 @@ entry:
     <vscale x 1 x i32> %1,
     i16 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1042,9 +1048,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwadd.w_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwadd.w_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv2i32_nxv2i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1055,7 +1061,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1065,10 +1071,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.i16(
   <vscale x 2 x i32>,
   i16,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i32_nxv2i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1080,7 +1086,7 @@ entry:
     <vscale x 2 x i32> %1,
     i16 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1089,9 +1095,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwadd.w_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwadd.w_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv4i32_nxv4i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1102,7 +1108,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1112,10 +1118,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.i16(
   <vscale x 4 x i32>,
   i16,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i32_nxv4i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1127,7 +1133,7 @@ entry:
     <vscale x 4 x i32> %1,
     i16 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1136,9 +1142,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwadd.w_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwadd.w_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv8i32_nxv8i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1149,7 +1155,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1159,10 +1165,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.i16(
   <vscale x 8 x i32>,
   i16,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i32_nxv8i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1174,7 +1180,7 @@ entry:
     <vscale x 8 x i32> %1,
     i16 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1183,9 +1189,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwadd.w_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwadd.w_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv16i32_nxv16i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1196,7 +1202,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1206,10 +1212,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.i16(
   <vscale x 16 x i32>,
   i16,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv16i32_nxv16i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1221,7 +1227,7 @@ entry:
     <vscale x 16 x i32> %1,
     i16 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1230,9 +1236,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwadd.w_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwadd.w_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv1i64_nxv1i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1243,7 +1249,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1253,10 +1259,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.i32(
   <vscale x 1 x i64>,
   i32,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwadd.w_mask_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwadd.w_mask_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i64_nxv1i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1268,7 +1274,7 @@ entry:
     <vscale x 1 x i64> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1277,9 +1283,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwadd.w_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwadd.w_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv2i64_nxv2i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1290,7 +1296,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1300,10 +1306,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.i32(
   <vscale x 2 x i64>,
   i32,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwadd.w_mask_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwadd.w_mask_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i64_nxv2i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1315,7 +1321,7 @@ entry:
     <vscale x 2 x i64> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1324,9 +1330,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwadd.w_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwadd.w_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv4i64_nxv4i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1337,7 +1343,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1347,10 +1353,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.i32(
   <vscale x 4 x i64>,
   i32,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwadd.w_mask_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwadd.w_mask_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i64_nxv4i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1362,7 +1368,7 @@ entry:
     <vscale x 4 x i64> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1371,9 +1377,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwadd.w_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwadd.w_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv8i64_nxv8i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1384,7 +1390,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1394,10 +1400,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.i32(
   <vscale x 8 x i64>,
   i32,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i64_nxv8i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1409,12 +1415,12 @@ entry:
     <vscale x 8 x i64> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -1426,12 +1432,12 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x i8> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -1443,12 +1449,12 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x i8> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -1460,12 +1466,12 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x i8> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -1477,12 +1483,12 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -1494,12 +1500,12 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x i8> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -1511,12 +1517,12 @@ entry:
     <vscale x 32 x i16> %0,
     <vscale x 32 x i8> %1,
     <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -1528,12 +1534,12 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x i16> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -1545,12 +1551,12 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x i16> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -1562,12 +1568,12 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -1579,12 +1585,12 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x i16> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -1596,12 +1602,12 @@ entry:
     <vscale x 16 x i32> %0,
     <vscale x 16 x i16> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
 
-define <vscale x 1 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -1613,12 +1619,12 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x i32> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
 
-define <vscale x 2 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -1630,12 +1636,12 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
 
-define <vscale x 4 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -1647,12 +1653,12 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x i32> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
 
-define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -1664,12 +1670,12 @@ entry:
     <vscale x 8 x i64> %0,
     <vscale x 8 x i32> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv1i16_nxv1i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -1681,12 +1687,12 @@ entry:
     <vscale x 1 x i16> %0,
     i8 %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv2i16_nxv2i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -1698,12 +1704,12 @@ entry:
     <vscale x 2 x i16> %0,
     i8 %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv4i16_nxv4i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -1715,12 +1721,12 @@ entry:
     <vscale x 4 x i16> %0,
     i8 %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv8i16_nxv8i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -1732,12 +1738,12 @@ entry:
     <vscale x 8 x i16> %0,
     i8 %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv16i16_nxv16i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -1749,12 +1755,12 @@ entry:
     <vscale x 16 x i16> %0,
     i8 %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv32i16_nxv32i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1766,12 +1772,12 @@ entry:
     <vscale x 32 x i16> %0,
     i8 %1,
     <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv1i32_nxv1i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1783,12 +1789,12 @@ entry:
     <vscale x 1 x i32> %0,
     i16 %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv2i32_nxv2i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1800,12 +1806,12 @@ entry:
     <vscale x 2 x i32> %0,
     i16 %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv4i32_nxv4i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1817,12 +1823,12 @@ entry:
     <vscale x 4 x i32> %0,
     i16 %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv8i32_nxv8i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1834,12 +1840,12 @@ entry:
     <vscale x 8 x i32> %0,
     i16 %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv16i32_nxv16i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1851,12 +1857,12 @@ entry:
     <vscale x 16 x i32> %0,
     i16 %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
 
-define <vscale x 1 x i64> @intrinsic_vwadd.w_mask_wx_tie_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwadd.w_mask_wx_tie_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv1i64_nxv1i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1868,12 +1874,12 @@ entry:
     <vscale x 1 x i64> %0,
     i32 %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
 
-define <vscale x 2 x i64> @intrinsic_vwadd.w_mask_wx_tie_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwadd.w_mask_wx_tie_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv2i64_nxv2i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1885,12 +1891,12 @@ entry:
     <vscale x 2 x i64> %0,
     i32 %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
 
-define <vscale x 4 x i64> @intrinsic_vwadd.w_mask_wx_tie_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwadd.w_mask_wx_tie_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv4i64_nxv4i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1902,12 +1908,12 @@ entry:
     <vscale x 4 x i64> %0,
     i32 %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
 
-define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wx_tie_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wx_tie_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv8i64_nxv8i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1919,12 +1925,12 @@ entry:
     <vscale x 8 x i64> %0,
     i32 %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vwadd.w_wv_untie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwadd.w_wv_untie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -1936,12 +1942,12 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %1,
     <vscale x 1 x i8> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vwadd.w_wv_untie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwadd.w_wv_untie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -1953,12 +1959,12 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %1,
     <vscale x 2 x i8> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vwadd.w_wv_untie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwadd.w_wv_untie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -1970,12 +1976,12 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i8> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vwadd.w_wv_untie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwadd.w_wv_untie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -1987,12 +1993,12 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %1,
     <vscale x 8 x i8> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vwadd.w_wv_untie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwadd.w_wv_untie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -2004,12 +2010,12 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %1,
     <vscale x 16 x i8> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 32 x i16> @intrinsic_vwadd.w_wv_untie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwadd.w_wv_untie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -2021,12 +2027,12 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %1,
     <vscale x 32 x i8> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vwadd.w_wv_untie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwadd.w_wv_untie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -2038,12 +2044,12 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %1,
     <vscale x 1 x i16> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vwadd.w_wv_untie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwadd.w_wv_untie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -2055,12 +2061,12 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i16> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vwadd.w_wv_untie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwadd.w_wv_untie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -2072,12 +2078,12 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %1,
     <vscale x 4 x i16> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vwadd.w_wv_untie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwadd.w_wv_untie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -2089,12 +2095,12 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %1,
     <vscale x 8 x i16> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 1 x i64> @intrinsic_vwadd.w_wv_untie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwadd.w_wv_untie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -2106,12 +2112,12 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %1,
     <vscale x 1 x i32> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
 
-define <vscale x 2 x i64> @intrinsic_vwadd.w_wv_untie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwadd.w_wv_untie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -2123,12 +2129,12 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %1,
     <vscale x 2 x i32> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
 
-define <vscale x 4 x i64> @intrinsic_vwadd.w_wv_untie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwadd.w_wv_untie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -2140,12 +2146,12 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %1,
     <vscale x 4 x i32> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
 
-define <vscale x 8 x i64> @intrinsic_vwadd.w_wv_untie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwadd.w_wv_untie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -2157,7 +2163,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %1,
     <vscale x 8 x i32> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll
deleted file mode 100644
index 4e1412b401cccf..00000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll
+++ /dev/null
@@ -1,1442 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vwaddu.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwaddu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv1i16_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vwaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwaddu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_vv_nxv2i16_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vwaddu.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwaddu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv2i16_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vwaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwaddu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_vv_nxv4i16_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vwaddu.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwaddu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv4i16_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vwaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwaddu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_vv_nxv8i16_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vwaddu.vv v10, v8, v9
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwaddu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv8i16_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vwaddu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwaddu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_vv_nxv16i16_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vwaddu.vv v12, v8, v10
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwaddu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv16i16_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vwaddu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwaddu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_vv_nxv32i16_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vwaddu.vv v16, v8, v12
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwaddu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv32i16_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vwaddu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwaddu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_vv_nxv1i32_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vwaddu.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwaddu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv1i32_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vwaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwaddu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_vv_nxv2i32_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vwaddu.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwaddu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv2i32_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vwaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwaddu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_vv_nxv4i32_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vwaddu.vv v10, v8, v9
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwaddu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv4i32_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vwaddu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwaddu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_vv_nxv8i32_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vwaddu.vv v12, v8, v10
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwaddu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv8i32_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vwaddu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwaddu_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_vv_nxv16i32_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vwaddu.vv v16, v8, v12
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwaddu_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv16i32_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vwaddu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwaddu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_vv_nxv1i64_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vwaddu.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwaddu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv1i64_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vwaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwaddu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_vv_nxv2i64_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vwaddu.vv v10, v8, v9
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwaddu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv2i64_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vwaddu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwaddu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_vv_nxv4i64_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vwaddu.vv v12, v8, v10
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwaddu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv4i64_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vwaddu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwaddu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_vv_nxv8i64_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vwaddu.vv v16, v8, v12
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwaddu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv8i64_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vwaddu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  i8,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwaddu_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_vx_nxv1i16_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vwaddu.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwaddu_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv1i16_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vwaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i8> %1,
-    i8 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  i8,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwaddu_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_vx_nxv2i16_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vwaddu.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwaddu_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv2i16_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vwaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i8> %1,
-    i8 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  i8,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwaddu_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_vx_nxv4i16_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vwaddu.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwaddu_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv4i16_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vwaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i8> %1,
-    i8 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  i8,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwaddu_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_vx_nxv8i16_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vwaddu.vx v10, v8, a0
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwaddu_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv8i16_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vwaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i8> %1,
-    i8 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  i8,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwaddu_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_vx_nxv16i16_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vwaddu.vx v12, v8, a0
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwaddu_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv16i16_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vwaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i8> %1,
-    i8 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  i8,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwaddu_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_vx_nxv32i16_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vwaddu.vx v16, v8, a0
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwaddu_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv32i16_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vwaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i8> %1,
-    i8 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  i16,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwaddu_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_vx_nxv1i32_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vwaddu.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwaddu_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv1i32_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vwaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i16> %1,
-    i16 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  i16,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwaddu_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_vx_nxv2i32_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vwaddu.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwaddu_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv2i32_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vwaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i16> %1,
-    i16 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  i16,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwaddu_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_vx_nxv4i32_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vwaddu.vx v10, v8, a0
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwaddu_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv4i32_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vwaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i16> %1,
-    i16 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  i16,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwaddu_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_vx_nxv8i32_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vwaddu.vx v12, v8, a0
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwaddu_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv8i32_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vwaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i16> %1,
-    i16 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  i16,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwaddu_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_vx_nxv16i32_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vwaddu.vx v16, v8, a0
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwaddu_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv16i32_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vwaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i16> %1,
-    i16 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i32,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwaddu_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_vx_nxv1i64_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vwaddu.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwaddu_mask_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv1i64_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vwaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i32,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwaddu_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_vx_nxv2i64_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vwaddu.vx v10, v8, a0
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwaddu_mask_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv2i64_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vwaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i32,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwaddu_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_vx_nxv4i64_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vwaddu.vx v12, v8, a0
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwaddu_mask_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv4i64_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vwaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i32,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwaddu_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_vx_nxv8i64_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vwaddu.vx v16, v8, a0
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwaddu_mask_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv8i64_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vwaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu.ll
similarity index 87%
rename from llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vwaddu.ll
index c04b92b7e5e01e..2c3943958d08c2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -19,7 +21,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -29,10 +31,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwaddu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwaddu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv1i16_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -44,7 +46,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -53,9 +55,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwaddu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwaddu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv2i16_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -67,7 +69,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -77,10 +79,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwaddu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwaddu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv2i16_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -92,7 +94,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -101,9 +103,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwaddu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwaddu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv4i16_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -115,7 +117,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -125,10 +127,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwaddu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwaddu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv4i16_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -140,7 +142,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -149,9 +151,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwaddu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwaddu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv8i16_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -163,7 +165,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -173,10 +175,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwaddu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwaddu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv8i16_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -188,7 +190,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -197,9 +199,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwaddu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwaddu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv16i16_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -211,7 +213,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -221,10 +223,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwaddu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwaddu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv16i16_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -236,7 +238,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -245,9 +247,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwaddu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwaddu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv32i16_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -259,7 +261,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -269,10 +271,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwaddu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwaddu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv32i16_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -284,7 +286,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -293,9 +295,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwaddu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwaddu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv1i32_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -307,7 +309,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -317,10 +319,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwaddu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwaddu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv1i32_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -332,7 +334,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -341,9 +343,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwaddu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwaddu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv2i32_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -355,7 +357,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -365,10 +367,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwaddu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwaddu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv2i32_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -380,7 +382,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -389,9 +391,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwaddu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwaddu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv4i32_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -403,7 +405,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -413,10 +415,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwaddu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwaddu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv4i32_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -428,7 +430,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -437,9 +439,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwaddu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwaddu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv8i32_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -451,7 +453,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -461,10 +463,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwaddu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwaddu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv8i32_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -476,7 +478,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -485,9 +487,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwaddu_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwaddu_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv16i32_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -499,7 +501,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -509,10 +511,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwaddu_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwaddu_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv16i32_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -524,7 +526,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -533,9 +535,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwaddu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwaddu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv1i64_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -547,7 +549,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -557,10 +559,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwaddu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwaddu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv1i64_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -572,7 +574,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -581,9 +583,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwaddu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwaddu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv2i64_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -595,7 +597,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -605,10 +607,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwaddu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwaddu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv2i64_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -620,7 +622,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -629,9 +631,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwaddu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwaddu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv4i64_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -643,7 +645,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -653,10 +655,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwaddu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwaddu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv4i64_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -668,7 +670,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -677,9 +679,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwaddu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwaddu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv8i64_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -691,7 +693,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -701,10 +703,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwaddu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwaddu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv8i64_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -716,7 +718,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -725,9 +727,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwaddu_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwaddu_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv1i16_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -739,7 +741,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -749,10 +751,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwaddu_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwaddu_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv1i16_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -764,7 +766,7 @@ entry:
     <vscale x 1 x i8> %1,
     i8 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -773,9 +775,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwaddu_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwaddu_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv2i16_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -787,7 +789,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -797,10 +799,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwaddu_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwaddu_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv2i16_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -812,7 +814,7 @@ entry:
     <vscale x 2 x i8> %1,
     i8 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -821,9 +823,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwaddu_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwaddu_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv4i16_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -835,7 +837,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -845,10 +847,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwaddu_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwaddu_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv4i16_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -860,7 +862,7 @@ entry:
     <vscale x 4 x i8> %1,
     i8 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -869,9 +871,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwaddu_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwaddu_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv8i16_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -883,7 +885,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -893,10 +895,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwaddu_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwaddu_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv8i16_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -908,7 +910,7 @@ entry:
     <vscale x 8 x i8> %1,
     i8 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -917,9 +919,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwaddu_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwaddu_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv16i16_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -931,7 +933,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -941,10 +943,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwaddu_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwaddu_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv16i16_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -956,7 +958,7 @@ entry:
     <vscale x 16 x i8> %1,
     i8 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -965,9 +967,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwaddu_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwaddu_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv32i16_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -979,7 +981,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -989,10 +991,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwaddu_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwaddu_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv32i16_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1004,7 +1006,7 @@ entry:
     <vscale x 32 x i8> %1,
     i8 %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1013,9 +1015,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwaddu_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwaddu_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv1i32_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1027,7 +1029,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1037,10 +1039,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwaddu_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwaddu_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv1i32_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1052,7 +1054,7 @@ entry:
     <vscale x 1 x i16> %1,
     i16 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1061,9 +1063,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwaddu_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwaddu_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv2i32_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1075,7 +1077,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1085,10 +1087,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwaddu_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwaddu_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv2i32_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1100,7 +1102,7 @@ entry:
     <vscale x 2 x i16> %1,
     i16 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1109,9 +1111,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwaddu_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwaddu_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv4i32_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1123,7 +1125,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1133,10 +1135,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwaddu_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwaddu_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv4i32_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1148,7 +1150,7 @@ entry:
     <vscale x 4 x i16> %1,
     i16 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1157,9 +1159,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwaddu_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwaddu_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv8i32_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1171,7 +1173,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1181,10 +1183,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwaddu_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwaddu_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv8i32_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1196,7 +1198,7 @@ entry:
     <vscale x 8 x i16> %1,
     i16 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1205,9 +1207,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwaddu_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwaddu_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv16i32_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1219,7 +1221,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1229,10 +1231,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwaddu_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwaddu_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv16i32_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1244,7 +1246,7 @@ entry:
     <vscale x 16 x i16> %1,
     i16 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1253,9 +1255,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwaddu_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwaddu_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv1i64_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1267,7 +1269,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1277,10 +1279,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwaddu_mask_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwaddu_mask_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv1i64_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1292,7 +1294,7 @@ entry:
     <vscale x 1 x i32> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1301,9 +1303,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwaddu_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwaddu_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv2i64_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1315,7 +1317,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1325,10 +1327,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwaddu_mask_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwaddu_mask_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv2i64_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1340,7 +1342,7 @@ entry:
     <vscale x 2 x i32> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1349,9 +1351,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwaddu_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwaddu_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv4i64_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1363,7 +1365,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1373,10 +1375,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwaddu_mask_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwaddu_mask_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv4i64_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1388,7 +1390,7 @@ entry:
     <vscale x 4 x i32> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1397,9 +1399,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwaddu_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwaddu_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv8i64_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1411,7 +1413,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1421,10 +1423,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwaddu_mask_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwaddu_mask_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv8i64_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1436,7 +1438,7 @@ entry:
     <vscale x 8 x i32> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll
deleted file mode 100644
index a3df148ae0ad5b..00000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll
+++ /dev/null
@@ -1,2163 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwaddu.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv1i16_nxv1i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vwaddu.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwaddu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vwaddu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwaddu.w_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv2i16_nxv2i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vwaddu.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwaddu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vwaddu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwaddu.w_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv4i16_nxv4i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vwaddu.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwaddu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vwaddu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwaddu.w_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv8i16_nxv8i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vwaddu.wv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwaddu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vwaddu.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwaddu.w_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv16i16_nxv16i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vwaddu.wv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwaddu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vwaddu.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwaddu.w_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv32i16_nxv32i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vwaddu.wv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl4r.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vwaddu.wv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwaddu.w_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv1i32_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vwaddu.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwaddu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vwaddu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwaddu.w_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv2i32_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vwaddu.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwaddu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vwaddu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwaddu.w_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv4i32_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vwaddu.wv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwaddu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vwaddu.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwaddu.w_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv8i32_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vwaddu.wv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwaddu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vwaddu.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwaddu.w_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv16i32_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vwaddu.wv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl4re16.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vwaddu.wv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwaddu.w_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv1i64_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vwaddu.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwaddu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vwaddu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwaddu.w_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv2i64_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vwaddu.wv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwaddu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vwaddu.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwaddu.w_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv4i64_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vwaddu.wv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwaddu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vwaddu.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwaddu.w_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv8i64_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vwaddu.wv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwaddu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl4re32.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vwaddu.wv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i8,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwaddu.w_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv1i16_nxv1i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.i8(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i8,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwaddu.w_mask_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv1i16_nxv1i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vwaddu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i8 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i8,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwaddu.w_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv2i16_nxv2i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.i8(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i8,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwaddu.w_mask_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv2i16_nxv2i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vwaddu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i8 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i8,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwaddu.w_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv4i16_nxv4i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.i8(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i8,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwaddu.w_mask_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv4i16_nxv4i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vwaddu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i8 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i8,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwaddu.w_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv8i16_nxv8i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.i8(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i8,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwaddu.w_mask_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv8i16_nxv8i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vwaddu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i8 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i8,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwaddu.w_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv16i16_nxv16i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.i8(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i8,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwaddu.w_mask_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv16i16_nxv16i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vwaddu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i8 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i8,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwaddu.w_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv32i16_nxv32i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.i8(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i8,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwaddu.w_mask_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv32i16_nxv32i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vwaddu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i8 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i16,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwaddu.w_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv1i32_nxv1i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.i16(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i16,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwaddu.w_mask_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv1i32_nxv1i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vwaddu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i16 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i16,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwaddu.w_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv2i32_nxv2i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.i16(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i16,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwaddu.w_mask_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv2i32_nxv2i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vwaddu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i16 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i16,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwaddu.w_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv4i32_nxv4i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.i16(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i16,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwaddu.w_mask_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv4i32_nxv4i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vwaddu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i16 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i16,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwaddu.w_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv8i32_nxv8i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.i16(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i16,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwaddu.w_mask_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv8i32_nxv8i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vwaddu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i16 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i16,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwaddu.w_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv16i32_nxv16i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.i16(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i16,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwaddu.w_mask_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv16i32_nxv16i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vwaddu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i16 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i32,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwaddu.w_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv1i64_nxv1i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.i32(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i32,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwaddu.w_mask_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv1i64_nxv1i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vwaddu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i32,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwaddu.w_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv2i64_nxv2i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.i32(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i32,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwaddu.w_mask_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv2i64_nxv2i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vwaddu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i32,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwaddu.w_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv4i64_nxv4i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.i32(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i32,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwaddu.w_mask_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv4i64_nxv4i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vwaddu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i32,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwaddu.w_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv8i64_nxv8i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.i32(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i32,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwaddu.w_mask_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv8i64_nxv8i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vwaddu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vwaddu.wv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vwaddu.wv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vwaddu.wv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vwaddu.wv v8, v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vwaddu.wv v8, v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vwaddu.wv v8, v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vwaddu.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vwaddu.wv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vwaddu.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vwaddu.wv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vwaddu.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vwaddu.wv v8, v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vwaddu.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vwaddu.wv v8, v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 16 x i32> @intrinsic_vwaddu.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vwaddu.wv v8, v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vwaddu.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vwaddu.wv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vwaddu.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vwaddu.wv v8, v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vwaddu.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vwaddu.wv v8, v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vwaddu.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vwaddu.wv v8, v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vwaddu.w_mask_wx_tie_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv1i16_nxv1i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %0,
-    i8 %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vwaddu.w_mask_wx_tie_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv2i16_nxv2i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %0,
-    i8 %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vwaddu.w_mask_wx_tie_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv4i16_nxv4i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %0,
-    i8 %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vwaddu.w_mask_wx_tie_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv8i16_nxv8i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %0,
-    i8 %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vwaddu.w_mask_wx_tie_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv16i16_nxv16i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %0,
-    i8 %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vwaddu.w_mask_wx_tie_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv32i16_nxv32i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %0,
-    i8 %1,
-    <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vwaddu.w_mask_wx_tie_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv1i32_nxv1i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %0,
-    i16 %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vwaddu.w_mask_wx_tie_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv2i32_nxv2i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %0,
-    i16 %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vwaddu.w_mask_wx_tie_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv4i32_nxv4i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %0,
-    i16 %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vwaddu.w_mask_wx_tie_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv8i32_nxv8i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %0,
-    i16 %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 16 x i32> @intrinsic_vwaddu.w_mask_wx_tie_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv16i32_nxv16i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %0,
-    i16 %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vwaddu.w_mask_wx_tie_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv1i64_nxv1i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %0,
-    i32 %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vwaddu.w_mask_wx_tie_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv2i64_nxv2i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %0,
-    i32 %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vwaddu.w_mask_wx_tie_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv4i64_nxv4i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %0,
-    i32 %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vwaddu.w_mask_wx_tie_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv8i64_nxv8i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %0,
-    i32 %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vwaddu.w_wv_untie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv1i16_nxv1i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vwaddu.wv v10, v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i8> %0,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vwaddu.w_wv_untie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv2i16_nxv2i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vwaddu.wv v10, v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i8> %0,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vwaddu.w_wv_untie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv4i16_nxv4i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vwaddu.wv v10, v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i8> %0,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vwaddu.w_wv_untie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv8i16_nxv8i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vwaddu.wv v12, v10, v8
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i8> %0,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vwaddu.w_wv_untie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv16i16_nxv16i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vwaddu.wv v16, v12, v8
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i8> %0,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vwaddu.w_wv_untie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv32i16_nxv32i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vwaddu.wv v24, v16, v8
-; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %1,
-    <vscale x 32 x i8> %0,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vwaddu.w_wv_untie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv1i32_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vwaddu.wv v10, v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i16> %0,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vwaddu.w_wv_untie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv2i32_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vwaddu.wv v10, v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i16> %0,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vwaddu.w_wv_untie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv4i32_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vwaddu.wv v12, v10, v8
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i16> %0,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vwaddu.w_wv_untie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv8i32_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vwaddu.wv v16, v12, v8
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i16> %0,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vwaddu.w_wv_untie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv1i64_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vwaddu.wv v10, v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %0,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vwaddu.w_wv_untie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv2i64_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vwaddu.wv v12, v10, v8
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %0,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vwaddu.w_wv_untie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv4i64_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vwaddu.wv v16, v12, v8
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %0,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vwaddu.w_wv_untie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv8i64_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vwaddu.wv v24, v16, v8
-; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %0,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w.ll
similarity index 87%
rename from llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vwaddu.w.ll
index d454cc63f91445..59a665dc76cf78 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwaddu.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwaddu.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -18,7 +20,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -28,10 +30,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwaddu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwaddu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -43,7 +45,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -52,9 +54,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwaddu.w_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwaddu.w_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -65,7 +67,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -75,10 +77,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwaddu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwaddu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -90,7 +92,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -99,9 +101,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwaddu.w_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwaddu.w_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -112,7 +114,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -122,10 +124,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwaddu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwaddu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -137,7 +139,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -146,9 +148,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwaddu.w_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwaddu.w_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -159,7 +161,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -169,10 +171,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwaddu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwaddu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -184,7 +186,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -193,9 +195,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwaddu.w_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwaddu.w_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -206,7 +208,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -216,10 +218,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwaddu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwaddu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -231,7 +233,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -240,9 +242,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwaddu.w_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwaddu.w_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -253,7 +255,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
     <vscale x 32 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -263,10 +265,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl4r.v v24, (a0)
@@ -279,7 +281,7 @@ entry:
     <vscale x 32 x i16> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -288,9 +290,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwaddu.w_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwaddu.w_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -301,7 +303,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -311,10 +313,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwaddu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwaddu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -326,7 +328,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -335,9 +337,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwaddu.w_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwaddu.w_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -348,7 +350,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -358,10 +360,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwaddu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwaddu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -373,7 +375,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -382,9 +384,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwaddu.w_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwaddu.w_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -395,7 +397,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -405,10 +407,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwaddu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwaddu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -420,7 +422,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -429,9 +431,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwaddu.w_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwaddu.w_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -442,7 +444,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -452,10 +454,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwaddu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwaddu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -467,7 +469,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -476,9 +478,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwaddu.w_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwaddu.w_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv16i32_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -489,7 +491,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -499,10 +501,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl4re16.v v24, (a0)
@@ -515,7 +517,7 @@ entry:
     <vscale x 16 x i32> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -524,9 +526,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwaddu.w_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwaddu.w_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -537,7 +539,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -547,10 +549,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwaddu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwaddu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -562,7 +564,7 @@ entry:
     <vscale x 1 x i64> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -571,9 +573,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwaddu.w_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwaddu.w_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -584,7 +586,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -594,10 +596,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwaddu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwaddu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -609,7 +611,7 @@ entry:
     <vscale x 2 x i64> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -618,9 +620,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwaddu.w_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwaddu.w_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -631,7 +633,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -641,10 +643,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwaddu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwaddu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -656,7 +658,7 @@ entry:
     <vscale x 4 x i64> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -665,9 +667,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwaddu.w_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwaddu.w_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -678,7 +680,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -688,10 +690,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwaddu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwaddu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl4re32.v v24, (a0)
@@ -704,7 +706,7 @@ entry:
     <vscale x 8 x i64> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -713,9 +715,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwaddu.w_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwaddu.w_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv1i16_nxv1i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -726,7 +728,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -736,10 +738,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.i8(
   <vscale x 1 x i16>,
   i8,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwaddu.w_mask_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwaddu.w_mask_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv1i16_nxv1i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -751,7 +753,7 @@ entry:
     <vscale x 1 x i16> %1,
     i8 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -760,9 +762,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwaddu.w_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwaddu.w_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv2i16_nxv2i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -773,7 +775,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -783,10 +785,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.i8(
   <vscale x 2 x i16>,
   i8,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwaddu.w_mask_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwaddu.w_mask_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv2i16_nxv2i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -798,7 +800,7 @@ entry:
     <vscale x 2 x i16> %1,
     i8 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -807,9 +809,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwaddu.w_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwaddu.w_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv4i16_nxv4i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -820,7 +822,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -830,10 +832,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.i8(
   <vscale x 4 x i16>,
   i8,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwaddu.w_mask_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwaddu.w_mask_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv4i16_nxv4i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -845,7 +847,7 @@ entry:
     <vscale x 4 x i16> %1,
     i8 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -854,9 +856,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwaddu.w_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwaddu.w_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv8i16_nxv8i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -867,7 +869,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -877,10 +879,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.i8(
   <vscale x 8 x i16>,
   i8,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwaddu.w_mask_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwaddu.w_mask_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv8i16_nxv8i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -892,7 +894,7 @@ entry:
     <vscale x 8 x i16> %1,
     i8 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -901,9 +903,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwaddu.w_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwaddu.w_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv16i16_nxv16i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -914,7 +916,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -924,10 +926,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.i8(
   <vscale x 16 x i16>,
   i8,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwaddu.w_mask_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwaddu.w_mask_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv16i16_nxv16i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -939,7 +941,7 @@ entry:
     <vscale x 16 x i16> %1,
     i8 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -948,9 +950,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwaddu.w_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwaddu.w_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv32i16_nxv32i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -961,7 +963,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -971,10 +973,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.i8(
   <vscale x 32 x i16>,
   i8,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwaddu.w_mask_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwaddu.w_mask_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv32i16_nxv32i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -986,7 +988,7 @@ entry:
     <vscale x 32 x i16> %1,
     i8 %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -995,9 +997,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwaddu.w_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwaddu.w_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv1i32_nxv1i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1008,7 +1010,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1018,10 +1020,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.i16(
   <vscale x 1 x i32>,
   i16,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwaddu.w_mask_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwaddu.w_mask_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv1i32_nxv1i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1033,7 +1035,7 @@ entry:
     <vscale x 1 x i32> %1,
     i16 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1042,9 +1044,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwaddu.w_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwaddu.w_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv2i32_nxv2i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1055,7 +1057,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1065,10 +1067,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.i16(
   <vscale x 2 x i32>,
   i16,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwaddu.w_mask_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwaddu.w_mask_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv2i32_nxv2i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1080,7 +1082,7 @@ entry:
     <vscale x 2 x i32> %1,
     i16 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1089,9 +1091,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwaddu.w_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwaddu.w_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv4i32_nxv4i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1102,7 +1104,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1112,10 +1114,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.i16(
   <vscale x 4 x i32>,
   i16,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwaddu.w_mask_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwaddu.w_mask_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv4i32_nxv4i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1127,7 +1129,7 @@ entry:
     <vscale x 4 x i32> %1,
     i16 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1136,9 +1138,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwaddu.w_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwaddu.w_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv8i32_nxv8i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1149,7 +1151,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1159,10 +1161,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.i16(
   <vscale x 8 x i32>,
   i16,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwaddu.w_mask_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwaddu.w_mask_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv8i32_nxv8i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1174,7 +1176,7 @@ entry:
     <vscale x 8 x i32> %1,
     i16 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1183,9 +1185,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwaddu.w_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwaddu.w_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv16i32_nxv16i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1196,7 +1198,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1206,10 +1208,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.i16(
   <vscale x 16 x i32>,
   i16,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwaddu.w_mask_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwaddu.w_mask_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv16i32_nxv16i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1221,7 +1223,7 @@ entry:
     <vscale x 16 x i32> %1,
     i16 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1230,9 +1232,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwaddu.w_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwaddu.w_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv1i64_nxv1i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1243,7 +1245,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1253,10 +1255,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.i32(
   <vscale x 1 x i64>,
   i32,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwaddu.w_mask_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwaddu.w_mask_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv1i64_nxv1i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1268,7 +1270,7 @@ entry:
     <vscale x 1 x i64> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1277,9 +1279,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwaddu.w_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwaddu.w_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv2i64_nxv2i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1290,7 +1292,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1300,10 +1302,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.i32(
   <vscale x 2 x i64>,
   i32,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwaddu.w_mask_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwaddu.w_mask_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv2i64_nxv2i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1315,7 +1317,7 @@ entry:
     <vscale x 2 x i64> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1324,9 +1326,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwaddu.w_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwaddu.w_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv4i64_nxv4i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1337,7 +1339,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1347,10 +1349,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.i32(
   <vscale x 4 x i64>,
   i32,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwaddu.w_mask_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwaddu.w_mask_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv4i64_nxv4i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1362,7 +1364,7 @@ entry:
     <vscale x 4 x i64> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1371,9 +1373,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwaddu.w_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwaddu.w_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv8i64_nxv8i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1384,7 +1386,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1394,10 +1396,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.i32(
   <vscale x 8 x i64>,
   i32,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwaddu.w_mask_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwaddu.w_mask_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv8i64_nxv8i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1409,12 +1411,12 @@ entry:
     <vscale x 8 x i64> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -1426,12 +1428,12 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x i8> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -1443,12 +1445,12 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x i8> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -1460,12 +1462,12 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x i8> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -1477,12 +1479,12 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -1494,12 +1496,12 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x i8> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 32 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -1511,12 +1513,12 @@ entry:
     <vscale x 32 x i16> %0,
     <vscale x 32 x i8> %1,
     <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vwaddu.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwaddu.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -1528,12 +1530,12 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x i16> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vwaddu.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwaddu.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -1545,12 +1547,12 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x i16> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vwaddu.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwaddu.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -1562,12 +1564,12 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vwaddu.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwaddu.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -1579,12 +1581,12 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x i16> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 16 x i32> @intrinsic_vwaddu.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwaddu.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -1596,12 +1598,12 @@ entry:
     <vscale x 16 x i32> %0,
     <vscale x 16 x i16> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
 
-define <vscale x 1 x i64> @intrinsic_vwaddu.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwaddu.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -1613,12 +1615,12 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x i32> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
 
-define <vscale x 2 x i64> @intrinsic_vwaddu.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwaddu.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -1630,12 +1632,12 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
 
-define <vscale x 4 x i64> @intrinsic_vwaddu.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwaddu.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -1647,12 +1649,12 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x i32> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
 
-define <vscale x 8 x i64> @intrinsic_vwaddu.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwaddu.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -1664,12 +1666,12 @@ entry:
     <vscale x 8 x i64> %0,
     <vscale x 8 x i32> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vwaddu.w_mask_wx_tie_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwaddu.w_mask_wx_tie_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv1i16_nxv1i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -1681,12 +1683,12 @@ entry:
     <vscale x 1 x i16> %0,
     i8 %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vwaddu.w_mask_wx_tie_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwaddu.w_mask_wx_tie_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv2i16_nxv2i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -1698,12 +1700,12 @@ entry:
     <vscale x 2 x i16> %0,
     i8 %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vwaddu.w_mask_wx_tie_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwaddu.w_mask_wx_tie_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv4i16_nxv4i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -1715,12 +1717,12 @@ entry:
     <vscale x 4 x i16> %0,
     i8 %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vwaddu.w_mask_wx_tie_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwaddu.w_mask_wx_tie_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv8i16_nxv8i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -1732,12 +1734,12 @@ entry:
     <vscale x 8 x i16> %0,
     i8 %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vwaddu.w_mask_wx_tie_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwaddu.w_mask_wx_tie_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv16i16_nxv16i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -1749,12 +1751,12 @@ entry:
     <vscale x 16 x i16> %0,
     i8 %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 32 x i16> @intrinsic_vwaddu.w_mask_wx_tie_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwaddu.w_mask_wx_tie_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv32i16_nxv32i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1766,12 +1768,12 @@ entry:
     <vscale x 32 x i16> %0,
     i8 %1,
     <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vwaddu.w_mask_wx_tie_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwaddu.w_mask_wx_tie_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv1i32_nxv1i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1783,12 +1785,12 @@ entry:
     <vscale x 1 x i32> %0,
     i16 %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vwaddu.w_mask_wx_tie_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwaddu.w_mask_wx_tie_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv2i32_nxv2i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1800,12 +1802,12 @@ entry:
     <vscale x 2 x i32> %0,
     i16 %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vwaddu.w_mask_wx_tie_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwaddu.w_mask_wx_tie_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv4i32_nxv4i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1817,12 +1819,12 @@ entry:
     <vscale x 4 x i32> %0,
     i16 %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vwaddu.w_mask_wx_tie_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwaddu.w_mask_wx_tie_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv8i32_nxv8i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1834,12 +1836,12 @@ entry:
     <vscale x 8 x i32> %0,
     i16 %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 16 x i32> @intrinsic_vwaddu.w_mask_wx_tie_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwaddu.w_mask_wx_tie_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv16i32_nxv16i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1851,12 +1853,12 @@ entry:
     <vscale x 16 x i32> %0,
     i16 %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
 
-define <vscale x 1 x i64> @intrinsic_vwaddu.w_mask_wx_tie_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwaddu.w_mask_wx_tie_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv1i64_nxv1i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1868,12 +1870,12 @@ entry:
     <vscale x 1 x i64> %0,
     i32 %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
 
-define <vscale x 2 x i64> @intrinsic_vwaddu.w_mask_wx_tie_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwaddu.w_mask_wx_tie_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv2i64_nxv2i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1885,12 +1887,12 @@ entry:
     <vscale x 2 x i64> %0,
     i32 %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
 
-define <vscale x 4 x i64> @intrinsic_vwaddu.w_mask_wx_tie_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwaddu.w_mask_wx_tie_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv4i64_nxv4i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1902,12 +1904,12 @@ entry:
     <vscale x 4 x i64> %0,
     i32 %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
 
-define <vscale x 8 x i64> @intrinsic_vwaddu.w_mask_wx_tie_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwaddu.w_mask_wx_tie_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv8i64_nxv8i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1919,12 +1921,12 @@ entry:
     <vscale x 8 x i64> %0,
     i32 %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vwaddu.w_wv_untie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwaddu.w_wv_untie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -1936,12 +1938,12 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %1,
     <vscale x 1 x i8> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vwaddu.w_wv_untie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwaddu.w_wv_untie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -1953,12 +1955,12 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %1,
     <vscale x 2 x i8> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vwaddu.w_wv_untie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwaddu.w_wv_untie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -1970,12 +1972,12 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i8> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vwaddu.w_wv_untie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwaddu.w_wv_untie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -1987,12 +1989,12 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %1,
     <vscale x 8 x i8> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vwaddu.w_wv_untie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwaddu.w_wv_untie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -2004,12 +2006,12 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %1,
     <vscale x 16 x i8> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 32 x i16> @intrinsic_vwaddu.w_wv_untie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwaddu.w_wv_untie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -2021,12 +2023,12 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %1,
     <vscale x 32 x i8> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vwaddu.w_wv_untie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwaddu.w_wv_untie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -2038,12 +2040,12 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %1,
     <vscale x 1 x i16> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vwaddu.w_wv_untie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwaddu.w_wv_untie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -2055,12 +2057,12 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i16> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vwaddu.w_wv_untie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwaddu.w_wv_untie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -2072,12 +2074,12 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %1,
     <vscale x 4 x i16> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vwaddu.w_wv_untie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwaddu.w_wv_untie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -2089,12 +2091,12 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %1,
     <vscale x 8 x i16> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 1 x i64> @intrinsic_vwaddu.w_wv_untie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwaddu.w_wv_untie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -2106,12 +2108,12 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %1,
     <vscale x 1 x i32> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
 
-define <vscale x 2 x i64> @intrinsic_vwaddu.w_wv_untie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwaddu.w_wv_untie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -2123,12 +2125,12 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %1,
     <vscale x 2 x i32> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
 
-define <vscale x 4 x i64> @intrinsic_vwaddu.w_wv_untie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwaddu.w_wv_untie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -2140,12 +2142,12 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %1,
     <vscale x 4 x i32> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
 
-define <vscale x 8 x i64> @intrinsic_vwaddu.w_wv_untie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwaddu.w_wv_untie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -2157,7 +2159,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %1,
     <vscale x 8 x i32> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll
deleted file mode 100644
index 59de264da5f9b0..00000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll
+++ /dev/null
@@ -1,1412 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i32,
-  i32);
-
-define <vscale x 1 x i16>  @intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
-; CHECK-NEXT:    vwmacc.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i32, i32);
-
-define <vscale x 1 x i16>  @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; CHECK-NEXT:    vwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i32,
-  i32);
-
-define <vscale x 2 x i16>  @intrinsic_vwmacc_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i16_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
-; CHECK-NEXT:    vwmacc.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.nxv2i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i32, i32);
-
-define <vscale x 2 x i16>  @intrinsic_vwmacc_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i16_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
-; CHECK-NEXT:    vwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i32,
-  i32);
-
-define <vscale x 4 x i16>  @intrinsic_vwmacc_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i16_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
-; CHECK-NEXT:    vwmacc.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.nxv4i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i32, i32);
-
-define <vscale x 4 x i16>  @intrinsic_vwmacc_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i16_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
-; CHECK-NEXT:    vwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i32,
-  i32);
-
-define <vscale x 8 x i16>  @intrinsic_vwmacc_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_vv_nxv8i16_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
-; CHECK-NEXT:    vwmacc.vv v8, v10, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.nxv8i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i32, i32);
-
-define <vscale x 8 x i16>  @intrinsic_vwmacc_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i16_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
-; CHECK-NEXT:    vwmacc.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i32,
-  i32);
-
-define <vscale x 16 x i16>  @intrinsic_vwmacc_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_vv_nxv16i16_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
-; CHECK-NEXT:    vwmacc.vv v8, v12, v14
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.nxv16i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i32, i32);
-
-define <vscale x 16 x i16>  @intrinsic_vwmacc_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv16i16_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
-; CHECK-NEXT:    vwmacc.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i32,
-  i32);
-
-define <vscale x 32 x i16>  @intrinsic_vwmacc_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_vv_nxv32i16_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
-; CHECK-NEXT:    vwmacc.vv v8, v16, v20
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.nxv32i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i32, i32);
-
-define <vscale x 32 x i16>  @intrinsic_vwmacc_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv32i16_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
-; CHECK-NEXT:    vwmacc.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i32,
-  i32);
-
-define <vscale x 1 x i32>  @intrinsic_vwmacc_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i32_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
-; CHECK-NEXT:    vwmacc.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i32, i32);
-
-define <vscale x 1 x i32>  @intrinsic_vwmacc_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i32_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i32,
-  i32);
-
-define <vscale x 2 x i32>  @intrinsic_vwmacc_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i32_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
-; CHECK-NEXT:    vwmacc.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i32, i32);
-
-define <vscale x 2 x i32>  @intrinsic_vwmacc_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i32_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i32,
-  i32);
-
-define <vscale x 4 x i32>  @intrinsic_vwmacc_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i32_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
-; CHECK-NEXT:    vwmacc.vv v8, v10, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i32, i32);
-
-define <vscale x 4 x i32>  @intrinsic_vwmacc_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i32_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vwmacc.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i32,
-  i32);
-
-define <vscale x 8 x i32>  @intrinsic_vwmacc_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_vv_nxv8i32_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
-; CHECK-NEXT:    vwmacc.vv v8, v12, v14
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i32, i32);
-
-define <vscale x 8 x i32>  @intrinsic_vwmacc_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i32_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vwmacc.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i32,
-  i32);
-
-define <vscale x 16 x i32>  @intrinsic_vwmacc_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_vv_nxv16i32_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
-; CHECK-NEXT:    vwmacc.vv v8, v16, v20
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i32, i32);
-
-define <vscale x 16 x i32>  @intrinsic_vwmacc_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv16i32_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vwmacc.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32,
-  i32);
-
-define <vscale x 1 x i64>  @intrinsic_vwmacc_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i64_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
-; CHECK-NEXT:    vwmacc.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32, i32);
-
-define <vscale x 1 x i64>  @intrinsic_vwmacc_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i64_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32,
-  i32);
-
-define <vscale x 2 x i64>  @intrinsic_vwmacc_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i64_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
-; CHECK-NEXT:    vwmacc.vv v8, v10, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32, i32);
-
-define <vscale x 2 x i64>  @intrinsic_vwmacc_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i64_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vwmacc.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32,
-  i32);
-
-define <vscale x 4 x i64>  @intrinsic_vwmacc_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i64_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
-; CHECK-NEXT:    vwmacc.vv v8, v12, v14
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32, i32);
-
-define <vscale x 4 x i64>  @intrinsic_vwmacc_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i64_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vwmacc.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32,
-  i32);
-
-define <vscale x 8 x i64>  @intrinsic_vwmacc_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_vv_nxv8i64_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
-; CHECK-NEXT:    vwmacc.vv v8, v16, v20
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32, i32);
-
-define <vscale x 8 x i64>  @intrinsic_vwmacc_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i64_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vwmacc.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.i8(
-  <vscale x 1 x i16>,
-  i8,
-  <vscale x 1 x i8>,
-  i32,
-  i32);
-
-define <vscale x 1 x i16>  @intrinsic_vwmacc_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i16_i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
-; CHECK-NEXT:    vwmacc.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.i8(
-    <vscale x 1 x i16> %0,
-    i8 %1,
-    <vscale x 1 x i8> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.i8(
-  <vscale x 1 x i16>,
-  i8,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i32, i32);
-
-define <vscale x 1 x i16> @intrinsic_vwmacc_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i16_i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
-; CHECK-NEXT:    vwmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.i8(
-    <vscale x 1 x i16> %0,
-    i8 %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.i8(
-  <vscale x 2 x i16>,
-  i8,
-  <vscale x 2 x i8>,
-  i32,
-  i32);
-
-define <vscale x 2 x i16>  @intrinsic_vwmacc_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i16_i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
-; CHECK-NEXT:    vwmacc.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.i8(
-    <vscale x 2 x i16> %0,
-    i8 %1,
-    <vscale x 2 x i8> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.i8(
-  <vscale x 2 x i16>,
-  i8,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i32, i32);
-
-define <vscale x 2 x i16> @intrinsic_vwmacc_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i16_i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
-; CHECK-NEXT:    vwmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.i8(
-    <vscale x 2 x i16> %0,
-    i8 %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.i8(
-  <vscale x 4 x i16>,
-  i8,
-  <vscale x 4 x i8>,
-  i32,
-  i32);
-
-define <vscale x 4 x i16>  @intrinsic_vwmacc_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i16_i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, ma
-; CHECK-NEXT:    vwmacc.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.i8(
-    <vscale x 4 x i16> %0,
-    i8 %1,
-    <vscale x 4 x i8> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.i8(
-  <vscale x 4 x i16>,
-  i8,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i32, i32);
-
-define <vscale x 4 x i16> @intrinsic_vwmacc_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i16_i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
-; CHECK-NEXT:    vwmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.i8(
-    <vscale x 4 x i16> %0,
-    i8 %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.i8(
-  <vscale x 8 x i16>,
-  i8,
-  <vscale x 8 x i8>,
-  i32,
-  i32);
-
-define <vscale x 8 x i16>  @intrinsic_vwmacc_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_vx_nxv8i16_i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, ma
-; CHECK-NEXT:    vwmacc.vx v8, a0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.i8(
-    <vscale x 8 x i16> %0,
-    i8 %1,
-    <vscale x 8 x i8> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.i8(
-  <vscale x 8 x i16>,
-  i8,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i32, i32);
-
-define <vscale x 8 x i16> @intrinsic_vwmacc_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i16_i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
-; CHECK-NEXT:    vwmacc.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.i8(
-    <vscale x 8 x i16> %0,
-    i8 %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.i8(
-  <vscale x 16 x i16>,
-  i8,
-  <vscale x 16 x i8>,
-  i32,
-  i32);
-
-define <vscale x 16 x i16>  @intrinsic_vwmacc_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_vx_nxv16i16_i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, ma
-; CHECK-NEXT:    vwmacc.vx v8, a0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.i8(
-    <vscale x 16 x i16> %0,
-    i8 %1,
-    <vscale x 16 x i8> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.i8(
-  <vscale x 16 x i16>,
-  i8,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i32, i32);
-
-define <vscale x 16 x i16> @intrinsic_vwmacc_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv16i16_i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
-; CHECK-NEXT:    vwmacc.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.i8(
-    <vscale x 16 x i16> %0,
-    i8 %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.i8(
-  <vscale x 32 x i16>,
-  i8,
-  <vscale x 32 x i8>,
-  i32,
-  i32);
-
-define <vscale x 32 x i16>  @intrinsic_vwmacc_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_vx_nxv32i16_i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, ma
-; CHECK-NEXT:    vwmacc.vx v8, a0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.i8(
-    <vscale x 32 x i16> %0,
-    i8 %1,
-    <vscale x 32 x i8> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.i8(
-  <vscale x 32 x i16>,
-  i8,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i32, i32);
-
-define <vscale x 32 x i16> @intrinsic_vwmacc_mask_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv32i16_i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
-; CHECK-NEXT:    vwmacc.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.i8(
-    <vscale x 32 x i16> %0,
-    i8 %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.i16(
-  <vscale x 1 x i32>,
-  i16,
-  <vscale x 1 x i16>,
-  i32,
-  i32);
-
-define <vscale x 1 x i32>  @intrinsic_vwmacc_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i32_i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
-; CHECK-NEXT:    vwmacc.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.i16(
-    <vscale x 1 x i32> %0,
-    i16 %1,
-    <vscale x 1 x i16> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.i16(
-  <vscale x 1 x i32>,
-  i16,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i32, i32);
-
-define <vscale x 1 x i32> @intrinsic_vwmacc_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i32_i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
-; CHECK-NEXT:    vwmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.i16(
-    <vscale x 1 x i32> %0,
-    i16 %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.i16(
-  <vscale x 2 x i32>,
-  i16,
-  <vscale x 2 x i16>,
-  i32,
-  i32);
-
-define <vscale x 2 x i32>  @intrinsic_vwmacc_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i32_i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
-; CHECK-NEXT:    vwmacc.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.i16(
-    <vscale x 2 x i32> %0,
-    i16 %1,
-    <vscale x 2 x i16> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.i16(
-  <vscale x 2 x i32>,
-  i16,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i32, i32);
-
-define <vscale x 2 x i32> @intrinsic_vwmacc_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i32_i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
-; CHECK-NEXT:    vwmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.i16(
-    <vscale x 2 x i32> %0,
-    i16 %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.i16(
-  <vscale x 4 x i32>,
-  i16,
-  <vscale x 4 x i16>,
-  i32,
-  i32);
-
-define <vscale x 4 x i32>  @intrinsic_vwmacc_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i32_i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
-; CHECK-NEXT:    vwmacc.vx v8, a0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.i16(
-    <vscale x 4 x i32> %0,
-    i16 %1,
-    <vscale x 4 x i16> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.i16(
-  <vscale x 4 x i32>,
-  i16,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i32, i32);
-
-define <vscale x 4 x i32> @intrinsic_vwmacc_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i32_i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
-; CHECK-NEXT:    vwmacc.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.i16(
-    <vscale x 4 x i32> %0,
-    i16 %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.i16(
-  <vscale x 8 x i32>,
-  i16,
-  <vscale x 8 x i16>,
-  i32,
-  i32);
-
-define <vscale x 8 x i32>  @intrinsic_vwmacc_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_vx_nxv8i32_i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, ma
-; CHECK-NEXT:    vwmacc.vx v8, a0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.i16(
-    <vscale x 8 x i32> %0,
-    i16 %1,
-    <vscale x 8 x i16> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.i16(
-  <vscale x 8 x i32>,
-  i16,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i32, i32);
-
-define <vscale x 8 x i32> @intrinsic_vwmacc_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i32_i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
-; CHECK-NEXT:    vwmacc.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.i16(
-    <vscale x 8 x i32> %0,
-    i16 %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.i16(
-  <vscale x 16 x i32>,
-  i16,
-  <vscale x 16 x i16>,
-  i32,
-  i32);
-
-define <vscale x 16 x i32>  @intrinsic_vwmacc_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_vx_nxv16i32_i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, ma
-; CHECK-NEXT:    vwmacc.vx v8, a0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.i16(
-    <vscale x 16 x i32> %0,
-    i16 %1,
-    <vscale x 16 x i16> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.i16(
-  <vscale x 16 x i32>,
-  i16,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i32, i32);
-
-define <vscale x 16 x i32> @intrinsic_vwmacc_mask_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv16i32_i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
-; CHECK-NEXT:    vwmacc.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.i16(
-    <vscale x 16 x i32> %0,
-    i16 %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.i32(
-  <vscale x 1 x i64>,
-  i32,
-  <vscale x 1 x i32>,
-  i32,
-  i32);
-
-define <vscale x 1 x i64>  @intrinsic_vwmacc_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i64_i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
-; CHECK-NEXT:    vwmacc.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.i32(
-    <vscale x 1 x i64> %0,
-    i32 %1,
-    <vscale x 1 x i32> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.i32(
-  <vscale x 1 x i64>,
-  i32,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32, i32);
-
-define <vscale x 1 x i64> @intrinsic_vwmacc_mask_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i64_i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vwmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.i32(
-    <vscale x 1 x i64> %0,
-    i32 %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.i32(
-  <vscale x 2 x i64>,
-  i32,
-  <vscale x 2 x i32>,
-  i32,
-  i32);
-
-define <vscale x 2 x i64>  @intrinsic_vwmacc_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i64_i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
-; CHECK-NEXT:    vwmacc.vx v8, a0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.i32(
-    <vscale x 2 x i64> %0,
-    i32 %1,
-    <vscale x 2 x i32> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.i32(
-  <vscale x 2 x i64>,
-  i32,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32, i32);
-
-define <vscale x 2 x i64> @intrinsic_vwmacc_mask_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i64_i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vwmacc.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.i32(
-    <vscale x 2 x i64> %0,
-    i32 %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.i32(
-  <vscale x 4 x i64>,
-  i32,
-  <vscale x 4 x i32>,
-  i32,
-  i32);
-
-define <vscale x 4 x i64>  @intrinsic_vwmacc_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i64_i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
-; CHECK-NEXT:    vwmacc.vx v8, a0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.i32(
-    <vscale x 4 x i64> %0,
-    i32 %1,
-    <vscale x 4 x i32> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.i32(
-  <vscale x 4 x i64>,
-  i32,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32, i32);
-
-define <vscale x 4 x i64> @intrinsic_vwmacc_mask_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i64_i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vwmacc.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.i32(
-    <vscale x 4 x i64> %0,
-    i32 %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.i32(
-  <vscale x 8 x i64>,
-  i32,
-  <vscale x 8 x i32>,
-  i32,
-  i32);
-
-define <vscale x 8 x i64>  @intrinsic_vwmacc_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_vx_nxv8i64_i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, ma
-; CHECK-NEXT:    vwmacc.vx v8, a0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.i32(
-    <vscale x 8 x i64> %0,
-    i32 %1,
-    <vscale x 8 x i32> %2,
-    i32 %3, i32 0)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.i32(
-  <vscale x 8 x i64>,
-  i32,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32, i32);
-
-define <vscale x 8 x i64> @intrinsic_vwmacc_mask_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i64_i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vwmacc.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.i32(
-    <vscale x 8 x i64> %0,
-    i32 %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmacc.ll
similarity index 87%
rename from llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll
rename to llvm/test/CodeGen/RISCV/rvv/vwmacc.ll
index 7e65c4582a55e7..f710603355192f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmacc.ll
@@ -1,14 +1,16 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+d \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d \
+; RUN:   -verify-machineinstrs | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16>  @intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+define <vscale x 1 x i16>  @intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
@@ -19,7 +21,7 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -29,9 +31,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 1 x i16>  @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i16>  @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
@@ -43,7 +45,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -52,10 +54,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.nxv2i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16>  @intrinsic_vwmacc_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+define <vscale x 2 x i16>  @intrinsic_vwmacc_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i16_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
@@ -66,7 +68,7 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -76,9 +78,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 2 x i16>  @intrinsic_vwmacc_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i16>  @intrinsic_vwmacc_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i16_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
@@ -90,7 +92,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -99,10 +101,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.nxv4i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16>  @intrinsic_vwmacc_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+define <vscale x 4 x i16>  @intrinsic_vwmacc_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i16_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
@@ -113,7 +115,7 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -123,9 +125,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 4 x i16>  @intrinsic_vwmacc_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16>  @intrinsic_vwmacc_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i16_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
@@ -137,7 +139,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -146,10 +148,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.nxv8i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16>  @intrinsic_vwmacc_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+define <vscale x 8 x i16>  @intrinsic_vwmacc_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv8i16_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
@@ -160,7 +162,7 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -170,9 +172,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 8 x i16>  @intrinsic_vwmacc_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i16>  @intrinsic_vwmacc_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i16_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
@@ -184,7 +186,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -193,10 +195,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.nxv16i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16>  @intrinsic_vwmacc_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
+define <vscale x 16 x i16>  @intrinsic_vwmacc_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv16i16_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
@@ -207,7 +209,7 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -217,9 +219,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 16 x i16>  @intrinsic_vwmacc_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i16>  @intrinsic_vwmacc_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv16i16_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
@@ -231,7 +233,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -240,10 +242,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.nxv32i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16>  @intrinsic_vwmacc_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
+define <vscale x 32 x i16>  @intrinsic_vwmacc_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv32i16_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
@@ -254,7 +256,7 @@ entry:
     <vscale x 32 x i16> %0,
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 32 x i16> %a
 }
@@ -264,9 +266,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 32 x i16>  @intrinsic_vwmacc_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i16>  @intrinsic_vwmacc_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv32i16_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
@@ -278,7 +280,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 32 x i16> %a
 }
@@ -287,10 +289,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.nxv1i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32>  @intrinsic_vwmacc_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+define <vscale x 1 x i32>  @intrinsic_vwmacc_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i32_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
@@ -301,7 +303,7 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -311,9 +313,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 1 x i32>  @intrinsic_vwmacc_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i32>  @intrinsic_vwmacc_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i32_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -325,7 +327,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -334,10 +336,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.nxv2i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32>  @intrinsic_vwmacc_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+define <vscale x 2 x i32>  @intrinsic_vwmacc_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i32_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
@@ -348,7 +350,7 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -358,9 +360,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 2 x i32>  @intrinsic_vwmacc_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32>  @intrinsic_vwmacc_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i32_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -372,7 +374,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -381,10 +383,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.nxv4i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32>  @intrinsic_vwmacc_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+define <vscale x 4 x i32>  @intrinsic_vwmacc_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i32_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
@@ -395,7 +397,7 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -405,9 +407,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 4 x i32>  @intrinsic_vwmacc_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i32>  @intrinsic_vwmacc_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i32_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -419,7 +421,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -428,10 +430,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.nxv8i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32>  @intrinsic_vwmacc_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+define <vscale x 8 x i32>  @intrinsic_vwmacc_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv8i32_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
@@ -442,7 +444,7 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -452,9 +454,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 8 x i32>  @intrinsic_vwmacc_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i32>  @intrinsic_vwmacc_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i32_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -466,7 +468,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -475,10 +477,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.nxv16i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32>  @intrinsic_vwmacc_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
+define <vscale x 16 x i32>  @intrinsic_vwmacc_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv16i32_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
@@ -489,7 +491,7 @@ entry:
     <vscale x 16 x i32> %0,
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 16 x i32> %a
 }
@@ -499,9 +501,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 16 x i32>  @intrinsic_vwmacc_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i32>  @intrinsic_vwmacc_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv16i32_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -513,7 +515,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 16 x i32> %a
 }
@@ -522,10 +524,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64>  @intrinsic_vwmacc_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+define <vscale x 1 x i64>  @intrinsic_vwmacc_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i64_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
@@ -536,7 +538,7 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -546,9 +548,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 1 x i64>  @intrinsic_vwmacc_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i64>  @intrinsic_vwmacc_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i64_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -560,7 +562,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -569,10 +571,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.nxv2i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64>  @intrinsic_vwmacc_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+define <vscale x 2 x i64>  @intrinsic_vwmacc_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i64_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
@@ -583,7 +585,7 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -593,9 +595,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 2 x i64>  @intrinsic_vwmacc_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i64>  @intrinsic_vwmacc_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i64_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -607,7 +609,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -616,10 +618,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.nxv4i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64>  @intrinsic_vwmacc_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+define <vscale x 4 x i64>  @intrinsic_vwmacc_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i64_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
@@ -630,7 +632,7 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -640,9 +642,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 4 x i64>  @intrinsic_vwmacc_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i64>  @intrinsic_vwmacc_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i64_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -654,7 +656,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -663,10 +665,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.nxv8i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64>  @intrinsic_vwmacc_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+define <vscale x 8 x i64>  @intrinsic_vwmacc_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv8i64_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
@@ -677,7 +679,7 @@ entry:
     <vscale x 8 x i64> %0,
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i64> %a
 }
@@ -687,9 +689,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 8 x i64>  @intrinsic_vwmacc_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i64>  @intrinsic_vwmacc_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i64_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -701,7 +703,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i64> %a
 }
@@ -710,10 +712,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.i8(
   <vscale x 1 x i16>,
   i8,
   <vscale x 1 x i8>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16>  @intrinsic_vwmacc_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+define <vscale x 1 x i16>  @intrinsic_vwmacc_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i16_i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
@@ -724,7 +726,7 @@ entry:
     <vscale x 1 x i16> %0,
     i8 %1,
     <vscale x 1 x i8> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -734,9 +736,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.i8(
   i8,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwmacc_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwmacc_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i16_i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
@@ -748,7 +750,7 @@ entry:
     i8 %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -757,10 +759,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.i8(
   <vscale x 2 x i16>,
   i8,
   <vscale x 2 x i8>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16>  @intrinsic_vwmacc_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+define <vscale x 2 x i16>  @intrinsic_vwmacc_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i16_i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
@@ -771,7 +773,7 @@ entry:
     <vscale x 2 x i16> %0,
     i8 %1,
     <vscale x 2 x i8> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -781,9 +783,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.i8(
   i8,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwmacc_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwmacc_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i16_i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
@@ -795,7 +797,7 @@ entry:
     i8 %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -804,10 +806,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.i8(
   <vscale x 4 x i16>,
   i8,
   <vscale x 4 x i8>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16>  @intrinsic_vwmacc_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+define <vscale x 4 x i16>  @intrinsic_vwmacc_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i16_i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, ma
@@ -818,7 +820,7 @@ entry:
     <vscale x 4 x i16> %0,
     i8 %1,
     <vscale x 4 x i8> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -828,9 +830,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.i8(
   i8,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwmacc_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwmacc_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i16_i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
@@ -842,7 +844,7 @@ entry:
     i8 %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -851,10 +853,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.i8(
   <vscale x 8 x i16>,
   i8,
   <vscale x 8 x i8>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16>  @intrinsic_vwmacc_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+define <vscale x 8 x i16>  @intrinsic_vwmacc_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv8i16_i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, ma
@@ -865,7 +867,7 @@ entry:
     <vscale x 8 x i16> %0,
     i8 %1,
     <vscale x 8 x i8> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -875,9 +877,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.i8(
   i8,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwmacc_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwmacc_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i16_i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
@@ -889,7 +891,7 @@ entry:
     i8 %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -898,10 +900,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.i8(
   <vscale x 16 x i16>,
   i8,
   <vscale x 16 x i8>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16>  @intrinsic_vwmacc_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
+define <vscale x 16 x i16>  @intrinsic_vwmacc_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv16i16_i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, ma
@@ -912,7 +914,7 @@ entry:
     <vscale x 16 x i16> %0,
     i8 %1,
     <vscale x 16 x i8> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -922,9 +924,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.i8(
   i8,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwmacc_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwmacc_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv16i16_i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
@@ -936,7 +938,7 @@ entry:
     i8 %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -945,10 +947,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.i8(
   <vscale x 32 x i16>,
   i8,
   <vscale x 32 x i8>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16>  @intrinsic_vwmacc_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
+define <vscale x 32 x i16>  @intrinsic_vwmacc_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv32i16_i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, ma
@@ -959,7 +961,7 @@ entry:
     <vscale x 32 x i16> %0,
     i8 %1,
     <vscale x 32 x i8> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 32 x i16> %a
 }
@@ -969,9 +971,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.i8(
   i8,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwmacc_mask_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwmacc_mask_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv32i16_i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
@@ -983,7 +985,7 @@ entry:
     i8 %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 32 x i16> %a
 }
@@ -992,10 +994,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.i16(
   <vscale x 1 x i32>,
   i16,
   <vscale x 1 x i16>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32>  @intrinsic_vwmacc_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+define <vscale x 1 x i32>  @intrinsic_vwmacc_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i32_i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
@@ -1006,7 +1008,7 @@ entry:
     <vscale x 1 x i32> %0,
     i16 %1,
     <vscale x 1 x i16> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1016,9 +1018,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.i16(
   i16,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwmacc_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwmacc_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i32_i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
@@ -1030,7 +1032,7 @@ entry:
     i16 %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1039,10 +1041,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.i16(
   <vscale x 2 x i32>,
   i16,
   <vscale x 2 x i16>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32>  @intrinsic_vwmacc_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+define <vscale x 2 x i32>  @intrinsic_vwmacc_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i32_i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
@@ -1053,7 +1055,7 @@ entry:
     <vscale x 2 x i32> %0,
     i16 %1,
     <vscale x 2 x i16> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1063,9 +1065,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.i16(
   i16,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwmacc_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwmacc_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i32_i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
@@ -1077,7 +1079,7 @@ entry:
     i16 %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1086,10 +1088,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.i16(
   <vscale x 4 x i32>,
   i16,
   <vscale x 4 x i16>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32>  @intrinsic_vwmacc_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+define <vscale x 4 x i32>  @intrinsic_vwmacc_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i32_i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
@@ -1100,7 +1102,7 @@ entry:
     <vscale x 4 x i32> %0,
     i16 %1,
     <vscale x 4 x i16> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1110,9 +1112,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.i16(
   i16,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwmacc_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwmacc_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i32_i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
@@ -1124,7 +1126,7 @@ entry:
     i16 %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1133,10 +1135,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.i16(
   <vscale x 8 x i32>,
   i16,
   <vscale x 8 x i16>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32>  @intrinsic_vwmacc_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+define <vscale x 8 x i32>  @intrinsic_vwmacc_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv8i32_i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, ma
@@ -1147,7 +1149,7 @@ entry:
     <vscale x 8 x i32> %0,
     i16 %1,
     <vscale x 8 x i16> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1157,9 +1159,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.i16(
   i16,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwmacc_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwmacc_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i32_i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
@@ -1171,7 +1173,7 @@ entry:
     i16 %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1180,10 +1182,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.i16(
   <vscale x 16 x i32>,
   i16,
   <vscale x 16 x i16>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32>  @intrinsic_vwmacc_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
+define <vscale x 16 x i32>  @intrinsic_vwmacc_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv16i32_i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, ma
@@ -1194,7 +1196,7 @@ entry:
     <vscale x 16 x i32> %0,
     i16 %1,
     <vscale x 16 x i16> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1204,9 +1206,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.i16(
   i16,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwmacc_mask_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwmacc_mask_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv16i32_i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
@@ -1218,7 +1220,7 @@ entry:
     i16 %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1227,10 +1229,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.i32(
   <vscale x 1 x i64>,
   i32,
   <vscale x 1 x i32>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64>  @intrinsic_vwmacc_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+define <vscale x 1 x i64>  @intrinsic_vwmacc_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i64_i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
@@ -1241,7 +1243,7 @@ entry:
     <vscale x 1 x i64> %0,
     i32 %1,
     <vscale x 1 x i32> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1251,9 +1253,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.i32(
   i32,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwmacc_mask_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwmacc_mask_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i64_i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
@@ -1265,7 +1267,7 @@ entry:
     i32 %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1274,10 +1276,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.i32(
   <vscale x 2 x i64>,
   i32,
   <vscale x 2 x i32>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64>  @intrinsic_vwmacc_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+define <vscale x 2 x i64>  @intrinsic_vwmacc_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i64_i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
@@ -1288,7 +1290,7 @@ entry:
     <vscale x 2 x i64> %0,
     i32 %1,
     <vscale x 2 x i32> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1298,9 +1300,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.i32(
   i32,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwmacc_mask_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwmacc_mask_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i64_i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
@@ -1312,7 +1314,7 @@ entry:
     i32 %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1321,10 +1323,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.i32(
   <vscale x 4 x i64>,
   i32,
   <vscale x 4 x i32>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64>  @intrinsic_vwmacc_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+define <vscale x 4 x i64>  @intrinsic_vwmacc_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i64_i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
@@ -1335,7 +1337,7 @@ entry:
     <vscale x 4 x i64> %0,
     i32 %1,
     <vscale x 4 x i32> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1345,9 +1347,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.i32(
   i32,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwmacc_mask_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwmacc_mask_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i64_i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
@@ -1359,7 +1361,7 @@ entry:
     i32 %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1368,10 +1370,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.i32(
   <vscale x 8 x i64>,
   i32,
   <vscale x 8 x i32>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64>  @intrinsic_vwmacc_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+define <vscale x 8 x i64>  @intrinsic_vwmacc_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv8i64_i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, ma
@@ -1382,7 +1384,7 @@ entry:
     <vscale x 8 x i64> %0,
     i32 %1,
     <vscale x 8 x i32> %2,
-    i64 %3, i64 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1392,9 +1394,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.i32(
   i32,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i64, i64);
+  iXLen, iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwmacc_mask_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwmacc_mask_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i64_i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
@@ -1406,7 +1408,7 @@ entry:
     i32 %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll
deleted file mode 100644
index aaf47d2b2d23a9..00000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll
+++ /dev/null
@@ -1,1412 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16>  @intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
-; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i16_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16>  @intrinsic_vwmaccsu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i16_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
-; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i16_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
-; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16>  @intrinsic_vwmaccsu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i16_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
-; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i16_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
-; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16>  @intrinsic_vwmaccsu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv8i16_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
-; CHECK-NEXT:    vwmaccsu.vv v8, v10, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64, i64);
-
-define <vscale x 8 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i16_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
-; CHECK-NEXT:    vwmaccsu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16>  @intrinsic_vwmaccsu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv16i16_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
-; CHECK-NEXT:    vwmaccsu.vv v8, v12, v14
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64, i64);
-
-define <vscale x 16 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv16i16_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
-; CHECK-NEXT:    vwmaccsu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16>  @intrinsic_vwmaccsu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv32i16_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
-; CHECK-NEXT:    vwmaccsu.vv v8, v16, v20
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64, i64);
-
-define <vscale x 32 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv32i16_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
-; CHECK-NEXT:    vwmaccsu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32>  @intrinsic_vwmaccsu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i32_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
-; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i32_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32>  @intrinsic_vwmaccsu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i32_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
-; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i32_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32>  @intrinsic_vwmaccsu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i32_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
-; CHECK-NEXT:    vwmaccsu.vv v8, v10, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i32_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vwmaccsu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32>  @intrinsic_vwmaccsu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv8i32_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
-; CHECK-NEXT:    vwmaccsu.vv v8, v12, v14
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64, i64);
-
-define <vscale x 8 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i32_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vwmaccsu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32>  @intrinsic_vwmaccsu_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv16i32_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
-; CHECK-NEXT:    vwmaccsu.vv v8, v16, v20
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64, i64);
-
-define <vscale x 16 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv16i32_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vwmaccsu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64>  @intrinsic_vwmaccsu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i64_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
-; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i64>  @intrinsic_vwmaccsu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i64_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64>  @intrinsic_vwmaccsu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i64_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
-; CHECK-NEXT:    vwmaccsu.vv v8, v10, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i64>  @intrinsic_vwmaccsu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i64_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vwmaccsu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64>  @intrinsic_vwmaccsu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i64_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
-; CHECK-NEXT:    vwmaccsu.vv v8, v12, v14
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i64>  @intrinsic_vwmaccsu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i64_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vwmaccsu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64>  @intrinsic_vwmaccsu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv8i64_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
-; CHECK-NEXT:    vwmaccsu.vv v8, v16, v20
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64, i64);
-
-define <vscale x 8 x i64>  @intrinsic_vwmaccsu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i64_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vwmaccsu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.i8(
-  <vscale x 1 x i16>,
-  i8,
-  <vscale x 1 x i8>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16>  @intrinsic_vwmaccsu_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i16_i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
-; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.i8(
-    <vscale x 1 x i16> %0,
-    i8 %1,
-    <vscale x 1 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.i8(
-  <vscale x 1 x i16>,
-  i8,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i16> @intrinsic_vwmaccsu_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i16_i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
-; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.i8(
-    <vscale x 1 x i16> %0,
-    i8 %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.i8(
-  <vscale x 2 x i16>,
-  i8,
-  <vscale x 2 x i8>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16>  @intrinsic_vwmaccsu_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i16_i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
-; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.i8(
-    <vscale x 2 x i16> %0,
-    i8 %1,
-    <vscale x 2 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.i8(
-  <vscale x 2 x i16>,
-  i8,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i16> @intrinsic_vwmaccsu_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i16_i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
-; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.i8(
-    <vscale x 2 x i16> %0,
-    i8 %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.i8(
-  <vscale x 4 x i16>,
-  i8,
-  <vscale x 4 x i8>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16>  @intrinsic_vwmaccsu_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i16_i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, ma
-; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.i8(
-    <vscale x 4 x i16> %0,
-    i8 %1,
-    <vscale x 4 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.i8(
-  <vscale x 4 x i16>,
-  i8,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i16> @intrinsic_vwmaccsu_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i16_i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
-; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.i8(
-    <vscale x 4 x i16> %0,
-    i8 %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.i8(
-  <vscale x 8 x i16>,
-  i8,
-  <vscale x 8 x i8>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16>  @intrinsic_vwmaccsu_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv8i16_i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, ma
-; CHECK-NEXT:    vwmaccsu.vx v8, a0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.i8(
-    <vscale x 8 x i16> %0,
-    i8 %1,
-    <vscale x 8 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.i8(
-  <vscale x 8 x i16>,
-  i8,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64, i64);
-
-define <vscale x 8 x i16> @intrinsic_vwmaccsu_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i16_i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
-; CHECK-NEXT:    vwmaccsu.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.i8(
-    <vscale x 8 x i16> %0,
-    i8 %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.i8(
-  <vscale x 16 x i16>,
-  i8,
-  <vscale x 16 x i8>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16>  @intrinsic_vwmaccsu_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv16i16_i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, ma
-; CHECK-NEXT:    vwmaccsu.vx v8, a0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.i8(
-    <vscale x 16 x i16> %0,
-    i8 %1,
-    <vscale x 16 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.i8(
-  <vscale x 16 x i16>,
-  i8,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64, i64);
-
-define <vscale x 16 x i16> @intrinsic_vwmaccsu_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv16i16_i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
-; CHECK-NEXT:    vwmaccsu.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.i8(
-    <vscale x 16 x i16> %0,
-    i8 %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.i8(
-  <vscale x 32 x i16>,
-  i8,
-  <vscale x 32 x i8>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16>  @intrinsic_vwmaccsu_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv32i16_i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, ma
-; CHECK-NEXT:    vwmaccsu.vx v8, a0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.i8(
-    <vscale x 32 x i16> %0,
-    i8 %1,
-    <vscale x 32 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.i8(
-  <vscale x 32 x i16>,
-  i8,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64, i64);
-
-define <vscale x 32 x i16> @intrinsic_vwmaccsu_mask_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv32i16_i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
-; CHECK-NEXT:    vwmaccsu.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.i8(
-    <vscale x 32 x i16> %0,
-    i8 %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.i16(
-  <vscale x 1 x i32>,
-  i16,
-  <vscale x 1 x i16>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32>  @intrinsic_vwmaccsu_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i32_i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
-; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.i16(
-    <vscale x 1 x i32> %0,
-    i16 %1,
-    <vscale x 1 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.i16(
-  <vscale x 1 x i32>,
-  i16,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i32> @intrinsic_vwmaccsu_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i32_i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
-; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.i16(
-    <vscale x 1 x i32> %0,
-    i16 %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.i16(
-  <vscale x 2 x i32>,
-  i16,
-  <vscale x 2 x i16>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32>  @intrinsic_vwmaccsu_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i32_i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
-; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.i16(
-    <vscale x 2 x i32> %0,
-    i16 %1,
-    <vscale x 2 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.i16(
-  <vscale x 2 x i32>,
-  i16,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i32> @intrinsic_vwmaccsu_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i32_i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
-; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.i16(
-    <vscale x 2 x i32> %0,
-    i16 %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.i16(
-  <vscale x 4 x i32>,
-  i16,
-  <vscale x 4 x i16>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32>  @intrinsic_vwmaccsu_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i32_i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
-; CHECK-NEXT:    vwmaccsu.vx v8, a0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.i16(
-    <vscale x 4 x i32> %0,
-    i16 %1,
-    <vscale x 4 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.i16(
-  <vscale x 4 x i32>,
-  i16,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i32> @intrinsic_vwmaccsu_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i32_i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
-; CHECK-NEXT:    vwmaccsu.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.i16(
-    <vscale x 4 x i32> %0,
-    i16 %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.i16(
-  <vscale x 8 x i32>,
-  i16,
-  <vscale x 8 x i16>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32>  @intrinsic_vwmaccsu_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv8i32_i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, ma
-; CHECK-NEXT:    vwmaccsu.vx v8, a0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.i16(
-    <vscale x 8 x i32> %0,
-    i16 %1,
-    <vscale x 8 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.i16(
-  <vscale x 8 x i32>,
-  i16,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64, i64);
-
-define <vscale x 8 x i32> @intrinsic_vwmaccsu_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i32_i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
-; CHECK-NEXT:    vwmaccsu.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.i16(
-    <vscale x 8 x i32> %0,
-    i16 %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.i16(
-  <vscale x 16 x i32>,
-  i16,
-  <vscale x 16 x i16>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32>  @intrinsic_vwmaccsu_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv16i32_i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, ma
-; CHECK-NEXT:    vwmaccsu.vx v8, a0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.i16(
-    <vscale x 16 x i32> %0,
-    i16 %1,
-    <vscale x 16 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.i16(
-  <vscale x 16 x i32>,
-  i16,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64, i64);
-
-define <vscale x 16 x i32> @intrinsic_vwmaccsu_mask_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv16i32_i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
-; CHECK-NEXT:    vwmaccsu.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.i16(
-    <vscale x 16 x i32> %0,
-    i16 %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.i32(
-  <vscale x 1 x i64>,
-  i32,
-  <vscale x 1 x i32>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64>  @intrinsic_vwmaccsu_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i64_i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
-; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.i32(
-    <vscale x 1 x i64> %0,
-    i32 %1,
-    <vscale x 1 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.i32(
-  <vscale x 1 x i64>,
-  i32,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i64> @intrinsic_vwmaccsu_mask_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i64_i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.i32(
-    <vscale x 1 x i64> %0,
-    i32 %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.i32(
-  <vscale x 2 x i64>,
-  i32,
-  <vscale x 2 x i32>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64>  @intrinsic_vwmaccsu_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i64_i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
-; CHECK-NEXT:    vwmaccsu.vx v8, a0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.i32(
-    <vscale x 2 x i64> %0,
-    i32 %1,
-    <vscale x 2 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.i32(
-  <vscale x 2 x i64>,
-  i32,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i64> @intrinsic_vwmaccsu_mask_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i64_i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vwmaccsu.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.i32(
-    <vscale x 2 x i64> %0,
-    i32 %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.i32(
-  <vscale x 4 x i64>,
-  i32,
-  <vscale x 4 x i32>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64>  @intrinsic_vwmaccsu_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i64_i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
-; CHECK-NEXT:    vwmaccsu.vx v8, a0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.i32(
-    <vscale x 4 x i64> %0,
-    i32 %1,
-    <vscale x 4 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.i32(
-  <vscale x 4 x i64>,
-  i32,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i64> @intrinsic_vwmaccsu_mask_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i64_i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vwmaccsu.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.i32(
-    <vscale x 4 x i64> %0,
-    i32 %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.i32(
-  <vscale x 8 x i64>,
-  i32,
-  <vscale x 8 x i32>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64>  @intrinsic_vwmaccsu_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv8i64_i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, ma
-; CHECK-NEXT:    vwmaccsu.vx v8, a0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.i32(
-    <vscale x 8 x i64> %0,
-    i32 %1,
-    <vscale x 8 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.i32(
-  <vscale x 8 x i64>,
-  i32,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64, i64);
-
-define <vscale x 8 x i64> @intrinsic_vwmaccsu_mask_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i64_i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vwmaccsu.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.i32(
-    <vscale x 8 x i64> %0,
-    i32 %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu.ll
similarity index 86%
rename from llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vwmaccsu.ll
index 12d8088f2c5583..9c253c036710bc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu.ll
@@ -1,14 +1,16 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+d \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d \
+; RUN:   -verify-machineinstrs | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16>  @intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+define <vscale x 1 x i16>  @intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
@@ -19,7 +21,7 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -29,9 +31,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i16_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
@@ -43,7 +45,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -52,10 +54,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16>  @intrinsic_vwmaccsu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+define <vscale x 2 x i16>  @intrinsic_vwmaccsu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i16_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
@@ -66,7 +68,7 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -76,9 +78,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i16_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
@@ -90,7 +92,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -99,10 +101,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16>  @intrinsic_vwmaccsu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+define <vscale x 4 x i16>  @intrinsic_vwmaccsu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i16_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
@@ -113,7 +115,7 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -123,9 +125,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i16_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
@@ -137,7 +139,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -146,10 +148,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16>  @intrinsic_vwmaccsu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+define <vscale x 8 x i16>  @intrinsic_vwmaccsu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv8i16_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
@@ -160,7 +162,7 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -170,9 +172,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 8 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i16_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
@@ -184,7 +186,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -193,10 +195,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16>  @intrinsic_vwmaccsu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+define <vscale x 16 x i16>  @intrinsic_vwmaccsu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv16i16_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
@@ -207,7 +209,7 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -217,9 +219,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 16 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv16i16_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
@@ -231,7 +233,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -240,10 +242,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16>  @intrinsic_vwmaccsu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+define <vscale x 32 x i16>  @intrinsic_vwmaccsu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv32i16_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
@@ -254,7 +256,7 @@ entry:
     <vscale x 32 x i16> %0,
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 32 x i16> %a
 }
@@ -264,9 +266,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 32 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv32i16_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
@@ -278,7 +280,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 32 x i16> %a
 }
@@ -287,10 +289,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32>  @intrinsic_vwmaccsu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+define <vscale x 1 x i32>  @intrinsic_vwmaccsu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i32_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
@@ -301,7 +303,7 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -311,9 +313,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i32_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -325,7 +327,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -334,10 +336,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32>  @intrinsic_vwmaccsu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+define <vscale x 2 x i32>  @intrinsic_vwmaccsu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i32_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
@@ -348,7 +350,7 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -358,9 +360,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i32_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -372,7 +374,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -381,10 +383,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32>  @intrinsic_vwmaccsu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define <vscale x 4 x i32>  @intrinsic_vwmaccsu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i32_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
@@ -395,7 +397,7 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -405,9 +407,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i32_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -419,7 +421,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -428,10 +430,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32>  @intrinsic_vwmaccsu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+define <vscale x 8 x i32>  @intrinsic_vwmaccsu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv8i32_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
@@ -442,7 +444,7 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -452,9 +454,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 8 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i32_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -466,7 +468,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -475,10 +477,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32>  @intrinsic_vwmaccsu_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+define <vscale x 16 x i32>  @intrinsic_vwmaccsu_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv16i32_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
@@ -489,7 +491,7 @@ entry:
     <vscale x 16 x i32> %0,
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 16 x i32> %a
 }
@@ -499,9 +501,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 16 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv16i32_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -513,7 +515,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 16 x i32> %a
 }
@@ -522,10 +524,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64>  @intrinsic_vwmaccsu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+define <vscale x 1 x i64>  @intrinsic_vwmaccsu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i64_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
@@ -536,7 +538,7 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -546,9 +548,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i64>  @intrinsic_vwmaccsu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64>  @intrinsic_vwmaccsu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i64_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -560,7 +562,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -569,10 +571,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64>  @intrinsic_vwmaccsu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+define <vscale x 2 x i64>  @intrinsic_vwmaccsu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i64_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
@@ -583,7 +585,7 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -593,9 +595,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i64>  @intrinsic_vwmaccsu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64>  @intrinsic_vwmaccsu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i64_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -607,7 +609,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -616,10 +618,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64>  @intrinsic_vwmaccsu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+define <vscale x 4 x i64>  @intrinsic_vwmaccsu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i64_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
@@ -630,7 +632,7 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -640,9 +642,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i64>  @intrinsic_vwmaccsu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64>  @intrinsic_vwmaccsu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i64_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -654,7 +656,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -663,10 +665,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64>  @intrinsic_vwmaccsu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+define <vscale x 8 x i64>  @intrinsic_vwmaccsu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv8i64_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
@@ -677,7 +679,7 @@ entry:
     <vscale x 8 x i64> %0,
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i64> %a
 }
@@ -687,9 +689,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 8 x i64>  @intrinsic_vwmaccsu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64>  @intrinsic_vwmaccsu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i64_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -701,7 +703,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i64> %a
 }
@@ -710,10 +712,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.i8(
   <vscale x 1 x i16>,
   i8,
   <vscale x 1 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16>  @intrinsic_vwmaccsu_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+define <vscale x 1 x i16>  @intrinsic_vwmaccsu_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i16_i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
@@ -724,7 +726,7 @@ entry:
     <vscale x 1 x i16> %0,
     i8 %1,
     <vscale x 1 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -734,9 +736,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.i8(
   i8,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwmaccsu_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwmaccsu_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i16_i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
@@ -748,7 +750,7 @@ entry:
     i8 %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -757,10 +759,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.i8(
   <vscale x 2 x i16>,
   i8,
   <vscale x 2 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16>  @intrinsic_vwmaccsu_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+define <vscale x 2 x i16>  @intrinsic_vwmaccsu_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i16_i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
@@ -771,7 +773,7 @@ entry:
     <vscale x 2 x i16> %0,
     i8 %1,
     <vscale x 2 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -781,9 +783,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.i8(
   i8,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwmaccsu_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwmaccsu_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i16_i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
@@ -795,7 +797,7 @@ entry:
     i8 %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -804,10 +806,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.i8(
   <vscale x 4 x i16>,
   i8,
   <vscale x 4 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16>  @intrinsic_vwmaccsu_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+define <vscale x 4 x i16>  @intrinsic_vwmaccsu_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i16_i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, ma
@@ -818,7 +820,7 @@ entry:
     <vscale x 4 x i16> %0,
     i8 %1,
     <vscale x 4 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -828,9 +830,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.i8(
   i8,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwmaccsu_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwmaccsu_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i16_i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
@@ -842,7 +844,7 @@ entry:
     i8 %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -851,10 +853,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.i8(
   <vscale x 8 x i16>,
   i8,
   <vscale x 8 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16>  @intrinsic_vwmaccsu_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+define <vscale x 8 x i16>  @intrinsic_vwmaccsu_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv8i16_i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, ma
@@ -865,7 +867,7 @@ entry:
     <vscale x 8 x i16> %0,
     i8 %1,
     <vscale x 8 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -875,9 +877,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.i8(
   i8,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwmaccsu_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwmaccsu_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i16_i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
@@ -889,7 +891,7 @@ entry:
     i8 %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -898,10 +900,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.i8(
   <vscale x 16 x i16>,
   i8,
   <vscale x 16 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16>  @intrinsic_vwmaccsu_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+define <vscale x 16 x i16>  @intrinsic_vwmaccsu_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv16i16_i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, ma
@@ -912,7 +914,7 @@ entry:
     <vscale x 16 x i16> %0,
     i8 %1,
     <vscale x 16 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -922,9 +924,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.i8(
   i8,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwmaccsu_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwmaccsu_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv16i16_i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
@@ -936,7 +938,7 @@ entry:
     i8 %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -945,10 +947,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.i8(
   <vscale x 32 x i16>,
   i8,
   <vscale x 32 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16>  @intrinsic_vwmaccsu_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+define <vscale x 32 x i16>  @intrinsic_vwmaccsu_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv32i16_i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, ma
@@ -959,7 +961,7 @@ entry:
     <vscale x 32 x i16> %0,
     i8 %1,
     <vscale x 32 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 32 x i16> %a
 }
@@ -969,9 +971,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.i8(
   i8,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwmaccsu_mask_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwmaccsu_mask_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv32i16_i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
@@ -983,7 +985,7 @@ entry:
     i8 %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 32 x i16> %a
 }
@@ -992,10 +994,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.i16(
   <vscale x 1 x i32>,
   i16,
   <vscale x 1 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32>  @intrinsic_vwmaccsu_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+define <vscale x 1 x i32>  @intrinsic_vwmaccsu_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i32_i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
@@ -1006,7 +1008,7 @@ entry:
     <vscale x 1 x i32> %0,
     i16 %1,
     <vscale x 1 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1016,9 +1018,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.i16(
   i16,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwmaccsu_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwmaccsu_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i32_i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
@@ -1030,7 +1032,7 @@ entry:
     i16 %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1039,10 +1041,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.i16(
   <vscale x 2 x i32>,
   i16,
   <vscale x 2 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32>  @intrinsic_vwmaccsu_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+define <vscale x 2 x i32>  @intrinsic_vwmaccsu_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i32_i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
@@ -1053,7 +1055,7 @@ entry:
     <vscale x 2 x i32> %0,
     i16 %1,
     <vscale x 2 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1063,9 +1065,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.i16(
   i16,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwmaccsu_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwmaccsu_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i32_i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
@@ -1077,7 +1079,7 @@ entry:
     i16 %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1086,10 +1088,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.i16(
   <vscale x 4 x i32>,
   i16,
   <vscale x 4 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32>  @intrinsic_vwmaccsu_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define <vscale x 4 x i32>  @intrinsic_vwmaccsu_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i32_i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
@@ -1100,7 +1102,7 @@ entry:
     <vscale x 4 x i32> %0,
     i16 %1,
     <vscale x 4 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1110,9 +1112,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.i16(
   i16,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwmaccsu_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwmaccsu_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i32_i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
@@ -1124,7 +1126,7 @@ entry:
     i16 %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1133,10 +1135,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.i16(
   <vscale x 8 x i32>,
   i16,
   <vscale x 8 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32>  @intrinsic_vwmaccsu_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+define <vscale x 8 x i32>  @intrinsic_vwmaccsu_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv8i32_i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, ma
@@ -1147,7 +1149,7 @@ entry:
     <vscale x 8 x i32> %0,
     i16 %1,
     <vscale x 8 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1157,9 +1159,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.i16(
   i16,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwmaccsu_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwmaccsu_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i32_i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
@@ -1171,7 +1173,7 @@ entry:
     i16 %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1180,10 +1182,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.i16(
   <vscale x 16 x i32>,
   i16,
   <vscale x 16 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32>  @intrinsic_vwmaccsu_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+define <vscale x 16 x i32>  @intrinsic_vwmaccsu_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv16i32_i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, ma
@@ -1194,7 +1196,7 @@ entry:
     <vscale x 16 x i32> %0,
     i16 %1,
     <vscale x 16 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1204,9 +1206,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.i16(
   i16,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwmaccsu_mask_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwmaccsu_mask_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv16i32_i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
@@ -1218,7 +1220,7 @@ entry:
     i16 %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1227,10 +1229,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.i32(
   <vscale x 1 x i64>,
   i32,
   <vscale x 1 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64>  @intrinsic_vwmaccsu_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+define <vscale x 1 x i64>  @intrinsic_vwmaccsu_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i64_i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
@@ -1241,7 +1243,7 @@ entry:
     <vscale x 1 x i64> %0,
     i32 %1,
     <vscale x 1 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1251,9 +1253,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.i32(
   i32,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwmaccsu_mask_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwmaccsu_mask_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i64_i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
@@ -1265,7 +1267,7 @@ entry:
     i32 %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1274,10 +1276,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.i32(
   <vscale x 2 x i64>,
   i32,
   <vscale x 2 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64>  @intrinsic_vwmaccsu_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+define <vscale x 2 x i64>  @intrinsic_vwmaccsu_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i64_i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
@@ -1288,7 +1290,7 @@ entry:
     <vscale x 2 x i64> %0,
     i32 %1,
     <vscale x 2 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1298,9 +1300,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.i32(
   i32,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwmaccsu_mask_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwmaccsu_mask_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i64_i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
@@ -1312,7 +1314,7 @@ entry:
     i32 %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1321,10 +1323,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.i32(
   <vscale x 4 x i64>,
   i32,
   <vscale x 4 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64>  @intrinsic_vwmaccsu_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+define <vscale x 4 x i64>  @intrinsic_vwmaccsu_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i64_i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
@@ -1335,7 +1337,7 @@ entry:
     <vscale x 4 x i64> %0,
     i32 %1,
     <vscale x 4 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1345,9 +1347,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.i32(
   i32,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwmaccsu_mask_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwmaccsu_mask_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i64_i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
@@ -1359,7 +1361,7 @@ entry:
     i32 %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1368,10 +1370,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.i32(
   <vscale x 8 x i64>,
   i32,
   <vscale x 8 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64>  @intrinsic_vwmaccsu_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+define <vscale x 8 x i64>  @intrinsic_vwmaccsu_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv8i64_i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, ma
@@ -1382,7 +1384,7 @@ entry:
     <vscale x 8 x i64> %0,
     i32 %1,
     <vscale x 8 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1392,9 +1394,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.i32(
   i32,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwmaccsu_mask_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwmaccsu_mask_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i64_i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
@@ -1406,7 +1408,7 @@ entry:
     i32 %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll
deleted file mode 100644
index 81b584086eeab6..00000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll
+++ /dev/null
@@ -1,1412 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16>  @intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
-; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i16>  @intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16>  @intrinsic_vwmaccu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i16_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
-; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.nxv2i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i16>  @intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
-; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16>  @intrinsic_vwmaccu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i16_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
-; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.nxv4i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i16>  @intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
-; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16>  @intrinsic_vwmaccu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i16_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
-; CHECK-NEXT:    vwmaccu.vv v8, v10, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.nxv8i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64, i64);
-
-define <vscale x 8 x i16>  @intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
-; CHECK-NEXT:    vwmaccu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16>  @intrinsic_vwmaccu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv16i16_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
-; CHECK-NEXT:    vwmaccu.vv v8, v12, v14
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.nxv16i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64, i64);
-
-define <vscale x 16 x i16>  @intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
-; CHECK-NEXT:    vwmaccu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16>  @intrinsic_vwmaccu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv32i16_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
-; CHECK-NEXT:    vwmaccu.vv v8, v16, v20
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.nxv32i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64, i64);
-
-define <vscale x 32 x i16>  @intrinsic_vwmaccu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv32i16_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
-; CHECK-NEXT:    vwmaccu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32>  @intrinsic_vwmaccu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i32_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
-; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i32>  @intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32>  @intrinsic_vwmaccu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i32_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
-; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i32>  @intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32>  @intrinsic_vwmaccu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i32_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
-; CHECK-NEXT:    vwmaccu.vv v8, v10, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i32>  @intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vwmaccu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32>  @intrinsic_vwmaccu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i32_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
-; CHECK-NEXT:    vwmaccu.vv v8, v12, v14
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64, i64);
-
-define <vscale x 8 x i32>  @intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vwmaccu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32>  @intrinsic_vwmaccu_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv16i32_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
-; CHECK-NEXT:    vwmaccu.vv v8, v16, v20
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64, i64);
-
-define <vscale x 16 x i32>  @intrinsic_vwmaccu_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv16i32_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vwmaccu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64>  @intrinsic_vwmaccu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i64_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
-; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i64>  @intrinsic_vwmaccu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i64_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64>  @intrinsic_vwmaccu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i64_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
-; CHECK-NEXT:    vwmaccu.vv v8, v10, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i64>  @intrinsic_vwmaccu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i64_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vwmaccu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64>  @intrinsic_vwmaccu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i64_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
-; CHECK-NEXT:    vwmaccu.vv v8, v12, v14
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i64>  @intrinsic_vwmaccu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i64_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vwmaccu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64>  @intrinsic_vwmaccu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i64_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
-; CHECK-NEXT:    vwmaccu.vv v8, v16, v20
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64, i64);
-
-define <vscale x 8 x i64>  @intrinsic_vwmaccu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i64_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vwmaccu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.i8(
-  <vscale x 1 x i16>,
-  i8,
-  <vscale x 1 x i8>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16>  @intrinsic_vwmaccu_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i16_i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
-; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.i8(
-    <vscale x 1 x i16> %0,
-    i8 %1,
-    <vscale x 1 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.i8(
-  <vscale x 1 x i16>,
-  i8,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i16> @intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
-; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.i8(
-    <vscale x 1 x i16> %0,
-    i8 %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.i8(
-  <vscale x 2 x i16>,
-  i8,
-  <vscale x 2 x i8>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16>  @intrinsic_vwmaccu_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i16_i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
-; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.i8(
-    <vscale x 2 x i16> %0,
-    i8 %1,
-    <vscale x 2 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.i8(
-  <vscale x 2 x i16>,
-  i8,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i16> @intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
-; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.i8(
-    <vscale x 2 x i16> %0,
-    i8 %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.i8(
-  <vscale x 4 x i16>,
-  i8,
-  <vscale x 4 x i8>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16>  @intrinsic_vwmaccu_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i16_i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, ma
-; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.i8(
-    <vscale x 4 x i16> %0,
-    i8 %1,
-    <vscale x 4 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.i8(
-  <vscale x 4 x i16>,
-  i8,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i16> @intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
-; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.i8(
-    <vscale x 4 x i16> %0,
-    i8 %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.i8(
-  <vscale x 8 x i16>,
-  i8,
-  <vscale x 8 x i8>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16>  @intrinsic_vwmaccu_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i16_i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, ma
-; CHECK-NEXT:    vwmaccu.vx v8, a0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.i8(
-    <vscale x 8 x i16> %0,
-    i8 %1,
-    <vscale x 8 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.i8(
-  <vscale x 8 x i16>,
-  i8,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64, i64);
-
-define <vscale x 8 x i16> @intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
-; CHECK-NEXT:    vwmaccu.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.i8(
-    <vscale x 8 x i16> %0,
-    i8 %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.i8(
-  <vscale x 16 x i16>,
-  i8,
-  <vscale x 16 x i8>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16>  @intrinsic_vwmaccu_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv16i16_i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, ma
-; CHECK-NEXT:    vwmaccu.vx v8, a0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.i8(
-    <vscale x 16 x i16> %0,
-    i8 %1,
-    <vscale x 16 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.i8(
-  <vscale x 16 x i16>,
-  i8,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64, i64);
-
-define <vscale x 16 x i16> @intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
-; CHECK-NEXT:    vwmaccu.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.i8(
-    <vscale x 16 x i16> %0,
-    i8 %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.i8(
-  <vscale x 32 x i16>,
-  i8,
-  <vscale x 32 x i8>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16>  @intrinsic_vwmaccu_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv32i16_i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, ma
-; CHECK-NEXT:    vwmaccu.vx v8, a0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.i8(
-    <vscale x 32 x i16> %0,
-    i8 %1,
-    <vscale x 32 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.i8(
-  <vscale x 32 x i16>,
-  i8,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64, i64);
-
-define <vscale x 32 x i16> @intrinsic_vwmaccu_mask_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv32i16_i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
-; CHECK-NEXT:    vwmaccu.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.i8(
-    <vscale x 32 x i16> %0,
-    i8 %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.i16(
-  <vscale x 1 x i32>,
-  i16,
-  <vscale x 1 x i16>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32>  @intrinsic_vwmaccu_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i32_i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
-; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.i16(
-    <vscale x 1 x i32> %0,
-    i16 %1,
-    <vscale x 1 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.i16(
-  <vscale x 1 x i32>,
-  i16,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i32> @intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
-; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.i16(
-    <vscale x 1 x i32> %0,
-    i16 %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.i16(
-  <vscale x 2 x i32>,
-  i16,
-  <vscale x 2 x i16>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32>  @intrinsic_vwmaccu_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i32_i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
-; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.i16(
-    <vscale x 2 x i32> %0,
-    i16 %1,
-    <vscale x 2 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.i16(
-  <vscale x 2 x i32>,
-  i16,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i32> @intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
-; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.i16(
-    <vscale x 2 x i32> %0,
-    i16 %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.i16(
-  <vscale x 4 x i32>,
-  i16,
-  <vscale x 4 x i16>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32>  @intrinsic_vwmaccu_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i32_i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
-; CHECK-NEXT:    vwmaccu.vx v8, a0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.i16(
-    <vscale x 4 x i32> %0,
-    i16 %1,
-    <vscale x 4 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.i16(
-  <vscale x 4 x i32>,
-  i16,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i32> @intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
-; CHECK-NEXT:    vwmaccu.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.i16(
-    <vscale x 4 x i32> %0,
-    i16 %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.i16(
-  <vscale x 8 x i32>,
-  i16,
-  <vscale x 8 x i16>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32>  @intrinsic_vwmaccu_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i32_i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, ma
-; CHECK-NEXT:    vwmaccu.vx v8, a0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.i16(
-    <vscale x 8 x i32> %0,
-    i16 %1,
-    <vscale x 8 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.i16(
-  <vscale x 8 x i32>,
-  i16,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64, i64);
-
-define <vscale x 8 x i32> @intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
-; CHECK-NEXT:    vwmaccu.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.i16(
-    <vscale x 8 x i32> %0,
-    i16 %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.i16(
-  <vscale x 16 x i32>,
-  i16,
-  <vscale x 16 x i16>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32>  @intrinsic_vwmaccu_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv16i32_i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, ma
-; CHECK-NEXT:    vwmaccu.vx v8, a0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.i16(
-    <vscale x 16 x i32> %0,
-    i16 %1,
-    <vscale x 16 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.i16(
-  <vscale x 16 x i32>,
-  i16,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64, i64);
-
-define <vscale x 16 x i32> @intrinsic_vwmaccu_mask_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv16i32_i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
-; CHECK-NEXT:    vwmaccu.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.i16(
-    <vscale x 16 x i32> %0,
-    i16 %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.i32(
-  <vscale x 1 x i64>,
-  i32,
-  <vscale x 1 x i32>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64>  @intrinsic_vwmaccu_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i64_i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
-; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.i32(
-    <vscale x 1 x i64> %0,
-    i32 %1,
-    <vscale x 1 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.i32(
-  <vscale x 1 x i64>,
-  i32,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i64> @intrinsic_vwmaccu_mask_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i64_i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.i32(
-    <vscale x 1 x i64> %0,
-    i32 %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.i32(
-  <vscale x 2 x i64>,
-  i32,
-  <vscale x 2 x i32>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64>  @intrinsic_vwmaccu_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i64_i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
-; CHECK-NEXT:    vwmaccu.vx v8, a0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.i32(
-    <vscale x 2 x i64> %0,
-    i32 %1,
-    <vscale x 2 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.i32(
-  <vscale x 2 x i64>,
-  i32,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i64> @intrinsic_vwmaccu_mask_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i64_i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vwmaccu.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.i32(
-    <vscale x 2 x i64> %0,
-    i32 %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.i32(
-  <vscale x 4 x i64>,
-  i32,
-  <vscale x 4 x i32>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64>  @intrinsic_vwmaccu_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i64_i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
-; CHECK-NEXT:    vwmaccu.vx v8, a0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.i32(
-    <vscale x 4 x i64> %0,
-    i32 %1,
-    <vscale x 4 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.i32(
-  <vscale x 4 x i64>,
-  i32,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i64> @intrinsic_vwmaccu_mask_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i64_i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vwmaccu.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.i32(
-    <vscale x 4 x i64> %0,
-    i32 %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.i32(
-  <vscale x 8 x i64>,
-  i32,
-  <vscale x 8 x i32>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64>  @intrinsic_vwmaccu_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i64_i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, ma
-; CHECK-NEXT:    vwmaccu.vx v8, a0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.i32(
-    <vscale x 8 x i64> %0,
-    i32 %1,
-    <vscale x 8 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.i32(
-  <vscale x 8 x i64>,
-  i32,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64, i64);
-
-define <vscale x 8 x i64> @intrinsic_vwmaccu_mask_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i64_i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vwmaccu.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.i32(
-    <vscale x 8 x i64> %0,
-    i32 %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccu.ll
similarity index 86%
rename from llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vwmaccu.ll
index f746854b8527e5..3cb6b626e36cb6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccu.ll
@@ -1,14 +1,16 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+d \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d \
+; RUN:   -verify-machineinstrs | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16>  @intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+define <vscale x 1 x i16>  @intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
@@ -19,7 +21,7 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -29,9 +31,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i16>  @intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16>  @intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
@@ -43,7 +45,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -52,10 +54,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.nxv2i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16>  @intrinsic_vwmaccu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+define <vscale x 2 x i16>  @intrinsic_vwmaccu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i16_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
@@ -66,7 +68,7 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -76,9 +78,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i16>  @intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16>  @intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
@@ -90,7 +92,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -99,10 +101,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.nxv4i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16>  @intrinsic_vwmaccu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+define <vscale x 4 x i16>  @intrinsic_vwmaccu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i16_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
@@ -113,7 +115,7 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -123,9 +125,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i16>  @intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16>  @intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
@@ -137,7 +139,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -146,10 +148,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.nxv8i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16>  @intrinsic_vwmaccu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+define <vscale x 8 x i16>  @intrinsic_vwmaccu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i16_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
@@ -160,7 +162,7 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -170,9 +172,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 8 x i16>  @intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16>  @intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
@@ -184,7 +186,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -193,10 +195,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.nxv16i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16>  @intrinsic_vwmaccu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+define <vscale x 16 x i16>  @intrinsic_vwmaccu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv16i16_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
@@ -207,7 +209,7 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -217,9 +219,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 16 x i16>  @intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16>  @intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
@@ -231,7 +233,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -240,10 +242,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.nxv32i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16>  @intrinsic_vwmaccu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+define <vscale x 32 x i16>  @intrinsic_vwmaccu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv32i16_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
@@ -254,7 +256,7 @@ entry:
     <vscale x 32 x i16> %0,
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 32 x i16> %a
 }
@@ -264,9 +266,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 32 x i16>  @intrinsic_vwmaccu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16>  @intrinsic_vwmaccu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv32i16_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
@@ -278,7 +280,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 32 x i16> %a
 }
@@ -287,10 +289,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.nxv1i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32>  @intrinsic_vwmaccu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+define <vscale x 1 x i32>  @intrinsic_vwmaccu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i32_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
@@ -301,7 +303,7 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -311,9 +313,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i32>  @intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32>  @intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -325,7 +327,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -334,10 +336,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.nxv2i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32>  @intrinsic_vwmaccu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+define <vscale x 2 x i32>  @intrinsic_vwmaccu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i32_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
@@ -348,7 +350,7 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -358,9 +360,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i32>  @intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32>  @intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -372,7 +374,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -381,10 +383,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.nxv4i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32>  @intrinsic_vwmaccu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define <vscale x 4 x i32>  @intrinsic_vwmaccu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i32_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
@@ -395,7 +397,7 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -405,9 +407,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i32>  @intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32>  @intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -419,7 +421,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -428,10 +430,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.nxv8i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32>  @intrinsic_vwmaccu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+define <vscale x 8 x i32>  @intrinsic_vwmaccu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i32_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
@@ -442,7 +444,7 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -452,9 +454,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 8 x i32>  @intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32>  @intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -466,7 +468,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -475,10 +477,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.nxv16i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32>  @intrinsic_vwmaccu_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+define <vscale x 16 x i32>  @intrinsic_vwmaccu_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv16i32_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
@@ -489,7 +491,7 @@ entry:
     <vscale x 16 x i32> %0,
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 16 x i32> %a
 }
@@ -499,9 +501,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 16 x i32>  @intrinsic_vwmaccu_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32>  @intrinsic_vwmaccu_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv16i32_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -513,7 +515,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 16 x i32> %a
 }
@@ -522,10 +524,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64>  @intrinsic_vwmaccu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+define <vscale x 1 x i64>  @intrinsic_vwmaccu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i64_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
@@ -536,7 +538,7 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -546,9 +548,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i64>  @intrinsic_vwmaccu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64>  @intrinsic_vwmaccu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i64_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -560,7 +562,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -569,10 +571,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.nxv2i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64>  @intrinsic_vwmaccu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+define <vscale x 2 x i64>  @intrinsic_vwmaccu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i64_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
@@ -583,7 +585,7 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -593,9 +595,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i64>  @intrinsic_vwmaccu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64>  @intrinsic_vwmaccu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i64_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -607,7 +609,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -616,10 +618,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.nxv4i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64>  @intrinsic_vwmaccu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+define <vscale x 4 x i64>  @intrinsic_vwmaccu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i64_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
@@ -630,7 +632,7 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -640,9 +642,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i64>  @intrinsic_vwmaccu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64>  @intrinsic_vwmaccu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i64_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -654,7 +656,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -663,10 +665,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.nxv8i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64>  @intrinsic_vwmaccu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+define <vscale x 8 x i64>  @intrinsic_vwmaccu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i64_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
@@ -677,7 +679,7 @@ entry:
     <vscale x 8 x i64> %0,
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i64> %a
 }
@@ -687,9 +689,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 8 x i64>  @intrinsic_vwmaccu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64>  @intrinsic_vwmaccu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i64_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -701,7 +703,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i64> %a
 }
@@ -710,10 +712,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.i8(
   <vscale x 1 x i16>,
   i8,
   <vscale x 1 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16>  @intrinsic_vwmaccu_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+define <vscale x 1 x i16>  @intrinsic_vwmaccu_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i16_i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
@@ -724,7 +726,7 @@ entry:
     <vscale x 1 x i16> %0,
     i8 %1,
     <vscale x 1 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -734,9 +736,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.i8(
   i8,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
@@ -748,7 +750,7 @@ entry:
     i8 %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -757,10 +759,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.i8(
   <vscale x 2 x i16>,
   i8,
   <vscale x 2 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16>  @intrinsic_vwmaccu_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+define <vscale x 2 x i16>  @intrinsic_vwmaccu_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i16_i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
@@ -771,7 +773,7 @@ entry:
     <vscale x 2 x i16> %0,
     i8 %1,
     <vscale x 2 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -781,9 +783,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.i8(
   i8,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
@@ -795,7 +797,7 @@ entry:
     i8 %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -804,10 +806,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.i8(
   <vscale x 4 x i16>,
   i8,
   <vscale x 4 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16>  @intrinsic_vwmaccu_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+define <vscale x 4 x i16>  @intrinsic_vwmaccu_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i16_i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, ma
@@ -818,7 +820,7 @@ entry:
     <vscale x 4 x i16> %0,
     i8 %1,
     <vscale x 4 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -828,9 +830,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.i8(
   i8,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
@@ -842,7 +844,7 @@ entry:
     i8 %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -851,10 +853,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.i8(
   <vscale x 8 x i16>,
   i8,
   <vscale x 8 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16>  @intrinsic_vwmaccu_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+define <vscale x 8 x i16>  @intrinsic_vwmaccu_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i16_i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, ma
@@ -865,7 +867,7 @@ entry:
     <vscale x 8 x i16> %0,
     i8 %1,
     <vscale x 8 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -875,9 +877,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.i8(
   i8,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
@@ -889,7 +891,7 @@ entry:
     i8 %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -898,10 +900,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.i8(
   <vscale x 16 x i16>,
   i8,
   <vscale x 16 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16>  @intrinsic_vwmaccu_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+define <vscale x 16 x i16>  @intrinsic_vwmaccu_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv16i16_i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, ma
@@ -912,7 +914,7 @@ entry:
     <vscale x 16 x i16> %0,
     i8 %1,
     <vscale x 16 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -922,9 +924,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.i8(
   i8,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
@@ -936,7 +938,7 @@ entry:
     i8 %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -945,10 +947,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.i8(
   <vscale x 32 x i16>,
   i8,
   <vscale x 32 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16>  @intrinsic_vwmaccu_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+define <vscale x 32 x i16>  @intrinsic_vwmaccu_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv32i16_i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, ma
@@ -959,7 +961,7 @@ entry:
     <vscale x 32 x i16> %0,
     i8 %1,
     <vscale x 32 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 32 x i16> %a
 }
@@ -969,9 +971,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.i8(
   i8,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwmaccu_mask_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwmaccu_mask_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv32i16_i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
@@ -983,7 +985,7 @@ entry:
     i8 %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 32 x i16> %a
 }
@@ -992,10 +994,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.i16(
   <vscale x 1 x i32>,
   i16,
   <vscale x 1 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32>  @intrinsic_vwmaccu_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+define <vscale x 1 x i32>  @intrinsic_vwmaccu_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i32_i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
@@ -1006,7 +1008,7 @@ entry:
     <vscale x 1 x i32> %0,
     i16 %1,
     <vscale x 1 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1016,9 +1018,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.i16(
   i16,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
@@ -1030,7 +1032,7 @@ entry:
     i16 %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1039,10 +1041,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.i16(
   <vscale x 2 x i32>,
   i16,
   <vscale x 2 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32>  @intrinsic_vwmaccu_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+define <vscale x 2 x i32>  @intrinsic_vwmaccu_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i32_i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
@@ -1053,7 +1055,7 @@ entry:
     <vscale x 2 x i32> %0,
     i16 %1,
     <vscale x 2 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1063,9 +1065,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.i16(
   i16,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
@@ -1077,7 +1079,7 @@ entry:
     i16 %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1086,10 +1088,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.i16(
   <vscale x 4 x i32>,
   i16,
   <vscale x 4 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32>  @intrinsic_vwmaccu_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define <vscale x 4 x i32>  @intrinsic_vwmaccu_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i32_i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
@@ -1100,7 +1102,7 @@ entry:
     <vscale x 4 x i32> %0,
     i16 %1,
     <vscale x 4 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1110,9 +1112,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.i16(
   i16,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
@@ -1124,7 +1126,7 @@ entry:
     i16 %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1133,10 +1135,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.i16(
   <vscale x 8 x i32>,
   i16,
   <vscale x 8 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32>  @intrinsic_vwmaccu_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+define <vscale x 8 x i32>  @intrinsic_vwmaccu_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i32_i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, ma
@@ -1147,7 +1149,7 @@ entry:
     <vscale x 8 x i32> %0,
     i16 %1,
     <vscale x 8 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1157,9 +1159,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.i16(
   i16,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
@@ -1171,7 +1173,7 @@ entry:
     i16 %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1180,10 +1182,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.i16(
   <vscale x 16 x i32>,
   i16,
   <vscale x 16 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32>  @intrinsic_vwmaccu_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+define <vscale x 16 x i32>  @intrinsic_vwmaccu_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv16i32_i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, ma
@@ -1194,7 +1196,7 @@ entry:
     <vscale x 16 x i32> %0,
     i16 %1,
     <vscale x 16 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1204,9 +1206,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.i16(
   i16,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwmaccu_mask_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwmaccu_mask_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv16i32_i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
@@ -1218,7 +1220,7 @@ entry:
     i16 %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1227,10 +1229,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.i32(
   <vscale x 1 x i64>,
   i32,
   <vscale x 1 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64>  @intrinsic_vwmaccu_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+define <vscale x 1 x i64>  @intrinsic_vwmaccu_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i64_i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
@@ -1241,7 +1243,7 @@ entry:
     <vscale x 1 x i64> %0,
     i32 %1,
     <vscale x 1 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1251,9 +1253,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.i32(
   i32,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwmaccu_mask_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwmaccu_mask_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i64_i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
@@ -1265,7 +1267,7 @@ entry:
     i32 %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1274,10 +1276,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.i32(
   <vscale x 2 x i64>,
   i32,
   <vscale x 2 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64>  @intrinsic_vwmaccu_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+define <vscale x 2 x i64>  @intrinsic_vwmaccu_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i64_i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
@@ -1288,7 +1290,7 @@ entry:
     <vscale x 2 x i64> %0,
     i32 %1,
     <vscale x 2 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1298,9 +1300,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.i32(
   i32,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwmaccu_mask_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwmaccu_mask_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i64_i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
@@ -1312,7 +1314,7 @@ entry:
     i32 %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1321,10 +1323,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.i32(
   <vscale x 4 x i64>,
   i32,
   <vscale x 4 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64>  @intrinsic_vwmaccu_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+define <vscale x 4 x i64>  @intrinsic_vwmaccu_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i64_i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
@@ -1335,7 +1337,7 @@ entry:
     <vscale x 4 x i64> %0,
     i32 %1,
     <vscale x 4 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1345,9 +1347,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.i32(
   i32,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwmaccu_mask_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwmaccu_mask_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i64_i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
@@ -1359,7 +1361,7 @@ entry:
     i32 %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1368,10 +1370,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.i32(
   <vscale x 8 x i64>,
   i32,
   <vscale x 8 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64>  @intrinsic_vwmaccu_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+define <vscale x 8 x i64>  @intrinsic_vwmaccu_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i64_i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, ma
@@ -1382,7 +1384,7 @@ entry:
     <vscale x 8 x i64> %0,
     i32 %1,
     <vscale x 8 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1392,9 +1394,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.i32(
   i32,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwmaccu_mask_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwmaccu_mask_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i64_i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
@@ -1406,7 +1408,7 @@ entry:
     i32 %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll
deleted file mode 100644
index b836f334755731..00000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll
+++ /dev/null
@@ -1,707 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8(
-  <vscale x 1 x i16>,
-  i8,
-  <vscale x 1 x i8>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16>  @intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
-; CHECK-NEXT:    vwmaccus.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8(
-    <vscale x 1 x i16> %0,
-    i8 %1,
-    <vscale x 1 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwmaccus.mask.nxv1i16.i8(
-  <vscale x 1 x i16>,
-  i8,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i16> @intrinsic_vwmaccus_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i16_i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
-; CHECK-NEXT:    vwmaccus.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.mask.nxv1i16.i8(
-    <vscale x 1 x i16> %0,
-    i8 %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwmaccus.nxv2i16.i8(
-  <vscale x 2 x i16>,
-  i8,
-  <vscale x 2 x i8>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16>  @intrinsic_vwmaccus_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i16_i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
-; CHECK-NEXT:    vwmaccus.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.nxv2i16.i8(
-    <vscale x 2 x i16> %0,
-    i8 %1,
-    <vscale x 2 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwmaccus.mask.nxv2i16.i8(
-  <vscale x 2 x i16>,
-  i8,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i16> @intrinsic_vwmaccus_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i16_i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
-; CHECK-NEXT:    vwmaccus.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.mask.nxv2i16.i8(
-    <vscale x 2 x i16> %0,
-    i8 %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwmaccus.nxv4i16.i8(
-  <vscale x 4 x i16>,
-  i8,
-  <vscale x 4 x i8>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16>  @intrinsic_vwmaccus_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i16_i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, ma
-; CHECK-NEXT:    vwmaccus.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.nxv4i16.i8(
-    <vscale x 4 x i16> %0,
-    i8 %1,
-    <vscale x 4 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwmaccus.mask.nxv4i16.i8(
-  <vscale x 4 x i16>,
-  i8,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i16> @intrinsic_vwmaccus_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i16_i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
-; CHECK-NEXT:    vwmaccus.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.mask.nxv4i16.i8(
-    <vscale x 4 x i16> %0,
-    i8 %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwmaccus.nxv8i16.i8(
-  <vscale x 8 x i16>,
-  i8,
-  <vscale x 8 x i8>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16>  @intrinsic_vwmaccus_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i16_i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, ma
-; CHECK-NEXT:    vwmaccus.vx v8, a0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.nxv8i16.i8(
-    <vscale x 8 x i16> %0,
-    i8 %1,
-    <vscale x 8 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwmaccus.mask.nxv8i16.i8(
-  <vscale x 8 x i16>,
-  i8,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64, i64);
-
-define <vscale x 8 x i16> @intrinsic_vwmaccus_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i16_i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
-; CHECK-NEXT:    vwmaccus.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.mask.nxv8i16.i8(
-    <vscale x 8 x i16> %0,
-    i8 %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwmaccus.nxv16i16.i8(
-  <vscale x 16 x i16>,
-  i8,
-  <vscale x 16 x i8>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16>  @intrinsic_vwmaccus_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv16i16_i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, ma
-; CHECK-NEXT:    vwmaccus.vx v8, a0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.nxv16i16.i8(
-    <vscale x 16 x i16> %0,
-    i8 %1,
-    <vscale x 16 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwmaccus.mask.nxv16i16.i8(
-  <vscale x 16 x i16>,
-  i8,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64, i64);
-
-define <vscale x 16 x i16> @intrinsic_vwmaccus_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv16i16_i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
-; CHECK-NEXT:    vwmaccus.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.mask.nxv16i16.i8(
-    <vscale x 16 x i16> %0,
-    i8 %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwmaccus.nxv32i16.i8(
-  <vscale x 32 x i16>,
-  i8,
-  <vscale x 32 x i8>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16>  @intrinsic_vwmaccus_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv32i16_i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, ma
-; CHECK-NEXT:    vwmaccus.vx v8, a0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.nxv32i16.i8(
-    <vscale x 32 x i16> %0,
-    i8 %1,
-    <vscale x 32 x i8> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwmaccus.mask.nxv32i16.i8(
-  <vscale x 32 x i16>,
-  i8,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64, i64);
-
-define <vscale x 32 x i16> @intrinsic_vwmaccus_mask_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv32i16_i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
-; CHECK-NEXT:    vwmaccus.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.mask.nxv32i16.i8(
-    <vscale x 32 x i16> %0,
-    i8 %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwmaccus.nxv1i32.i16(
-  <vscale x 1 x i32>,
-  i16,
-  <vscale x 1 x i16>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32>  @intrinsic_vwmaccus_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i32_i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
-; CHECK-NEXT:    vwmaccus.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.nxv1i32.i16(
-    <vscale x 1 x i32> %0,
-    i16 %1,
-    <vscale x 1 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwmaccus.mask.nxv1i32.i16(
-  <vscale x 1 x i32>,
-  i16,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i32> @intrinsic_vwmaccus_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i32_i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
-; CHECK-NEXT:    vwmaccus.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.mask.nxv1i32.i16(
-    <vscale x 1 x i32> %0,
-    i16 %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwmaccus.nxv2i32.i16(
-  <vscale x 2 x i32>,
-  i16,
-  <vscale x 2 x i16>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32>  @intrinsic_vwmaccus_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i32_i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
-; CHECK-NEXT:    vwmaccus.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.nxv2i32.i16(
-    <vscale x 2 x i32> %0,
-    i16 %1,
-    <vscale x 2 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwmaccus.mask.nxv2i32.i16(
-  <vscale x 2 x i32>,
-  i16,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i32> @intrinsic_vwmaccus_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i32_i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
-; CHECK-NEXT:    vwmaccus.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.mask.nxv2i32.i16(
-    <vscale x 2 x i32> %0,
-    i16 %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwmaccus.nxv4i32.i16(
-  <vscale x 4 x i32>,
-  i16,
-  <vscale x 4 x i16>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32>  @intrinsic_vwmaccus_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i32_i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
-; CHECK-NEXT:    vwmaccus.vx v8, a0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.nxv4i32.i16(
-    <vscale x 4 x i32> %0,
-    i16 %1,
-    <vscale x 4 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwmaccus.mask.nxv4i32.i16(
-  <vscale x 4 x i32>,
-  i16,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i32> @intrinsic_vwmaccus_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i32_i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
-; CHECK-NEXT:    vwmaccus.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.mask.nxv4i32.i16(
-    <vscale x 4 x i32> %0,
-    i16 %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwmaccus.nxv8i32.i16(
-  <vscale x 8 x i32>,
-  i16,
-  <vscale x 8 x i16>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32>  @intrinsic_vwmaccus_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i32_i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, ma
-; CHECK-NEXT:    vwmaccus.vx v8, a0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.nxv8i32.i16(
-    <vscale x 8 x i32> %0,
-    i16 %1,
-    <vscale x 8 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwmaccus.mask.nxv8i32.i16(
-  <vscale x 8 x i32>,
-  i16,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64, i64);
-
-define <vscale x 8 x i32> @intrinsic_vwmaccus_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i32_i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
-; CHECK-NEXT:    vwmaccus.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.mask.nxv8i32.i16(
-    <vscale x 8 x i32> %0,
-    i16 %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwmaccus.nxv16i32.i16(
-  <vscale x 16 x i32>,
-  i16,
-  <vscale x 16 x i16>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32>  @intrinsic_vwmaccus_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv16i32_i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, ma
-; CHECK-NEXT:    vwmaccus.vx v8, a0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.nxv16i32.i16(
-    <vscale x 16 x i32> %0,
-    i16 %1,
-    <vscale x 16 x i16> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwmaccus.mask.nxv16i32.i16(
-  <vscale x 16 x i32>,
-  i16,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64, i64);
-
-define <vscale x 16 x i32> @intrinsic_vwmaccus_mask_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv16i32_i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
-; CHECK-NEXT:    vwmaccus.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.mask.nxv16i32.i16(
-    <vscale x 16 x i32> %0,
-    i16 %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwmaccus.nxv1i64.i32(
-  <vscale x 1 x i64>,
-  i32,
-  <vscale x 1 x i32>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64>  @intrinsic_vwmaccus_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i64_i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
-; CHECK-NEXT:    vwmaccus.vx v8, a0, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.nxv1i64.i32(
-    <vscale x 1 x i64> %0,
-    i32 %1,
-    <vscale x 1 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwmaccus.mask.nxv1i64.i32(
-  <vscale x 1 x i64>,
-  i32,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64, i64);
-
-define <vscale x 1 x i64> @intrinsic_vwmaccus_mask_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i64_i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vwmaccus.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.mask.nxv1i64.i32(
-    <vscale x 1 x i64> %0,
-    i32 %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwmaccus.nxv2i64.i32(
-  <vscale x 2 x i64>,
-  i32,
-  <vscale x 2 x i32>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64>  @intrinsic_vwmaccus_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i64_i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
-; CHECK-NEXT:    vwmaccus.vx v8, a0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.nxv2i64.i32(
-    <vscale x 2 x i64> %0,
-    i32 %1,
-    <vscale x 2 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwmaccus.mask.nxv2i64.i32(
-  <vscale x 2 x i64>,
-  i32,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64, i64);
-
-define <vscale x 2 x i64> @intrinsic_vwmaccus_mask_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i64_i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vwmaccus.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.mask.nxv2i64.i32(
-    <vscale x 2 x i64> %0,
-    i32 %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwmaccus.nxv4i64.i32(
-  <vscale x 4 x i64>,
-  i32,
-  <vscale x 4 x i32>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64>  @intrinsic_vwmaccus_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i64_i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
-; CHECK-NEXT:    vwmaccus.vx v8, a0, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.nxv4i64.i32(
-    <vscale x 4 x i64> %0,
-    i32 %1,
-    <vscale x 4 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwmaccus.mask.nxv4i64.i32(
-  <vscale x 4 x i64>,
-  i32,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64, i64);
-
-define <vscale x 4 x i64> @intrinsic_vwmaccus_mask_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i64_i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vwmaccus.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.mask.nxv4i64.i32(
-    <vscale x 4 x i64> %0,
-    i32 %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwmaccus.nxv8i64.i32(
-  <vscale x 8 x i64>,
-  i32,
-  <vscale x 8 x i32>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64>  @intrinsic_vwmaccus_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i64_i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, ma
-; CHECK-NEXT:    vwmaccus.vx v8, a0, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.nxv8i64.i32(
-    <vscale x 8 x i64> %0,
-    i32 %1,
-    <vscale x 8 x i32> %2,
-    i64 %3, i64 0)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwmaccus.mask.nxv8i64.i32(
-  <vscale x 8 x i64>,
-  i32,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64, i64);
-
-define <vscale x 8 x i64> @intrinsic_vwmaccus_mask_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i64_i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vwmaccus.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.mask.nxv8i64.i32(
-    <vscale x 8 x i64> %0,
-    i32 %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 0)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccus.ll
similarity index 85%
rename from llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vwmaccus.ll
index 7bdf919c835c44..48df2f93352485 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccus.ll
@@ -1,14 +1,16 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+d \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d \
+; RUN:   -verify-machineinstrs | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8(
   <vscale x 1 x i16>,
   i8,
   <vscale x 1 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16>  @intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+define <vscale x 1 x i16>  @intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
@@ -19,7 +21,7 @@ entry:
     <vscale x 1 x i16> %0,
     i8 %1,
     <vscale x 1 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -29,9 +31,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccus.mask.nxv1i16.i8(
   i8,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwmaccus_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwmaccus_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i16_i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
@@ -43,7 +45,7 @@ entry:
     i8 %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -52,10 +54,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccus.nxv2i16.i8(
   <vscale x 2 x i16>,
   i8,
   <vscale x 2 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16>  @intrinsic_vwmaccus_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+define <vscale x 2 x i16>  @intrinsic_vwmaccus_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i16_i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, ma
@@ -66,7 +68,7 @@ entry:
     <vscale x 2 x i16> %0,
     i8 %1,
     <vscale x 2 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -76,9 +78,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccus.mask.nxv2i16.i8(
   i8,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwmaccus_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwmaccus_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i16_i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
@@ -90,7 +92,7 @@ entry:
     i8 %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -99,10 +101,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccus.nxv4i16.i8(
   <vscale x 4 x i16>,
   i8,
   <vscale x 4 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16>  @intrinsic_vwmaccus_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+define <vscale x 4 x i16>  @intrinsic_vwmaccus_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i16_i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, ma
@@ -113,7 +115,7 @@ entry:
     <vscale x 4 x i16> %0,
     i8 %1,
     <vscale x 4 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -123,9 +125,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccus.mask.nxv4i16.i8(
   i8,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwmaccus_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwmaccus_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i16_i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
@@ -137,7 +139,7 @@ entry:
     i8 %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -146,10 +148,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccus.nxv8i16.i8(
   <vscale x 8 x i16>,
   i8,
   <vscale x 8 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16>  @intrinsic_vwmaccus_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+define <vscale x 8 x i16>  @intrinsic_vwmaccus_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i16_i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, ma
@@ -160,7 +162,7 @@ entry:
     <vscale x 8 x i16> %0,
     i8 %1,
     <vscale x 8 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -170,9 +172,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccus.mask.nxv8i16.i8(
   i8,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwmaccus_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwmaccus_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i16_i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
@@ -184,7 +186,7 @@ entry:
     i8 %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -193,10 +195,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccus.nxv16i16.i8(
   <vscale x 16 x i16>,
   i8,
   <vscale x 16 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16>  @intrinsic_vwmaccus_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+define <vscale x 16 x i16>  @intrinsic_vwmaccus_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv16i16_i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, ma
@@ -207,7 +209,7 @@ entry:
     <vscale x 16 x i16> %0,
     i8 %1,
     <vscale x 16 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -217,9 +219,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccus.mask.nxv16i16.i8(
   i8,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwmaccus_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwmaccus_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv16i16_i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
@@ -231,7 +233,7 @@ entry:
     i8 %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -240,10 +242,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmaccus.nxv32i16.i8(
   <vscale x 32 x i16>,
   i8,
   <vscale x 32 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16>  @intrinsic_vwmaccus_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+define <vscale x 32 x i16>  @intrinsic_vwmaccus_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv32i16_i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, ma
@@ -254,7 +256,7 @@ entry:
     <vscale x 32 x i16> %0,
     i8 %1,
     <vscale x 32 x i8> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 32 x i16> %a
 }
@@ -264,9 +266,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmaccus.mask.nxv32i16.i8(
   i8,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwmaccus_mask_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwmaccus_mask_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv32i16_i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
@@ -278,7 +280,7 @@ entry:
     i8 %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 32 x i16> %a
 }
@@ -287,10 +289,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccus.nxv1i32.i16(
   <vscale x 1 x i32>,
   i16,
   <vscale x 1 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32>  @intrinsic_vwmaccus_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+define <vscale x 1 x i32>  @intrinsic_vwmaccus_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i32_i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
@@ -301,7 +303,7 @@ entry:
     <vscale x 1 x i32> %0,
     i16 %1,
     <vscale x 1 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -311,9 +313,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccus.mask.nxv1i32.i16(
   i16,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwmaccus_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwmaccus_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i32_i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
@@ -325,7 +327,7 @@ entry:
     i16 %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -334,10 +336,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccus.nxv2i32.i16(
   <vscale x 2 x i32>,
   i16,
   <vscale x 2 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32>  @intrinsic_vwmaccus_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+define <vscale x 2 x i32>  @intrinsic_vwmaccus_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i32_i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
@@ -348,7 +350,7 @@ entry:
     <vscale x 2 x i32> %0,
     i16 %1,
     <vscale x 2 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -358,9 +360,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccus.mask.nxv2i32.i16(
   i16,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwmaccus_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwmaccus_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i32_i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
@@ -372,7 +374,7 @@ entry:
     i16 %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -381,10 +383,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccus.nxv4i32.i16(
   <vscale x 4 x i32>,
   i16,
   <vscale x 4 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32>  @intrinsic_vwmaccus_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define <vscale x 4 x i32>  @intrinsic_vwmaccus_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i32_i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
@@ -395,7 +397,7 @@ entry:
     <vscale x 4 x i32> %0,
     i16 %1,
     <vscale x 4 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -405,9 +407,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccus.mask.nxv4i32.i16(
   i16,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwmaccus_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwmaccus_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i32_i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
@@ -419,7 +421,7 @@ entry:
     i16 %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -428,10 +430,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccus.nxv8i32.i16(
   <vscale x 8 x i32>,
   i16,
   <vscale x 8 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32>  @intrinsic_vwmaccus_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+define <vscale x 8 x i32>  @intrinsic_vwmaccus_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i32_i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, ma
@@ -442,7 +444,7 @@ entry:
     <vscale x 8 x i32> %0,
     i16 %1,
     <vscale x 8 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -452,9 +454,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccus.mask.nxv8i32.i16(
   i16,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwmaccus_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwmaccus_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i32_i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
@@ -466,7 +468,7 @@ entry:
     i16 %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -475,10 +477,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmaccus.nxv16i32.i16(
   <vscale x 16 x i32>,
   i16,
   <vscale x 16 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32>  @intrinsic_vwmaccus_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+define <vscale x 16 x i32>  @intrinsic_vwmaccus_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv16i32_i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, ma
@@ -489,7 +491,7 @@ entry:
     <vscale x 16 x i32> %0,
     i16 %1,
     <vscale x 16 x i16> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 16 x i32> %a
 }
@@ -499,9 +501,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmaccus.mask.nxv16i32.i16(
   i16,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwmaccus_mask_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwmaccus_mask_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv16i32_i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
@@ -513,7 +515,7 @@ entry:
     i16 %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 16 x i32> %a
 }
@@ -522,10 +524,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccus.nxv1i64.i32(
   <vscale x 1 x i64>,
   i32,
   <vscale x 1 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64>  @intrinsic_vwmaccus_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+define <vscale x 1 x i64>  @intrinsic_vwmaccus_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i64_i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
@@ -536,7 +538,7 @@ entry:
     <vscale x 1 x i64> %0,
     i32 %1,
     <vscale x 1 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -546,9 +548,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccus.mask.nxv1i64.i32(
   i32,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwmaccus_mask_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwmaccus_mask_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i64_i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
@@ -560,7 +562,7 @@ entry:
     i32 %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -569,10 +571,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccus.nxv2i64.i32(
   <vscale x 2 x i64>,
   i32,
   <vscale x 2 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64>  @intrinsic_vwmaccus_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+define <vscale x 2 x i64>  @intrinsic_vwmaccus_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i64_i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
@@ -583,7 +585,7 @@ entry:
     <vscale x 2 x i64> %0,
     i32 %1,
     <vscale x 2 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -593,9 +595,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccus.mask.nxv2i64.i32(
   i32,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwmaccus_mask_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwmaccus_mask_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i64_i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
@@ -607,7 +609,7 @@ entry:
     i32 %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -616,10 +618,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccus.nxv4i64.i32(
   <vscale x 4 x i64>,
   i32,
   <vscale x 4 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64>  @intrinsic_vwmaccus_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+define <vscale x 4 x i64>  @intrinsic_vwmaccus_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i64_i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
@@ -630,7 +632,7 @@ entry:
     <vscale x 4 x i64> %0,
     i32 %1,
     <vscale x 4 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -640,9 +642,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccus.mask.nxv4i64.i32(
   i32,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwmaccus_mask_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwmaccus_mask_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i64_i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
@@ -654,7 +656,7 @@ entry:
     i32 %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -663,10 +665,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmaccus.nxv8i64.i32(
   <vscale x 8 x i64>,
   i32,
   <vscale x 8 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64>  @intrinsic_vwmaccus_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+define <vscale x 8 x i64>  @intrinsic_vwmaccus_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i64_i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, ma
@@ -677,7 +679,7 @@ entry:
     <vscale x 8 x i64> %0,
     i32 %1,
     <vscale x 8 x i32> %2,
-    i32 %3, i32 0)
+    iXLen %3, iXLen 0)
 
   ret <vscale x 8 x i64> %a
 }
@@ -687,9 +689,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmaccus.mask.nxv8i64.i32(
   i32,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32, i32);
+  iXLen, iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwmaccus_mask_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwmaccus_mask_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i64_i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
@@ -701,7 +703,7 @@ entry:
     i32 %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 0)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll
deleted file mode 100644
index 6b0c423d17576d..00000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll
+++ /dev/null
@@ -1,1442 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vwmul.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwmul_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_vv_nxv2i16_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vwmul.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwmul_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i16_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwmul_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_vv_nxv4i16_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vwmul.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwmul_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i16_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwmul_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_vv_nxv8i16_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vwmul.vv v10, v8, v9
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwmul_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i16_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vwmul.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwmul_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_vv_nxv16i16_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vwmul.vv v12, v8, v10
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwmul_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv16i16_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vwmul.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwmul_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_vv_nxv32i16_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vwmul.vv v16, v8, v12
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwmul_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv32i16_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vwmul.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwmul_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_vv_nxv1i32_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vwmul.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwmul_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i32_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwmul_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_vv_nxv2i32_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vwmul.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwmul_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i32_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwmul_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_vv_nxv4i32_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vwmul.vv v10, v8, v9
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwmul_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i32_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vwmul.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwmul_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_vv_nxv8i32_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vwmul.vv v12, v8, v10
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwmul_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i32_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vwmul.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwmul_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_vv_nxv16i32_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vwmul.vv v16, v8, v12
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwmul_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv16i32_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vwmul.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwmul_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_vv_nxv1i64_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vwmul.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwmul_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i64_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwmul_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_vv_nxv2i64_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vwmul.vv v10, v8, v9
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwmul_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i64_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vwmul.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwmul_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_vv_nxv4i64_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vwmul.vv v12, v8, v10
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwmul_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i64_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vwmul.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwmul_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_vv_nxv8i64_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vwmul.vv v16, v8, v12
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwmul_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i64_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vwmul.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  i8,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwmul_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_vx_nxv1i16_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vwmul.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwmul_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i16_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vwmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i8> %1,
-    i8 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  i8,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwmul_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_vx_nxv2i16_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vwmul.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwmul_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i16_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vwmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i8> %1,
-    i8 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  i8,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwmul_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_vx_nxv4i16_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vwmul.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwmul_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i16_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vwmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i8> %1,
-    i8 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  i8,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwmul_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_vx_nxv8i16_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vwmul.vx v10, v8, a0
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwmul_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i16_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vwmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i8> %1,
-    i8 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  i8,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwmul_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_vx_nxv16i16_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vwmul.vx v12, v8, a0
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwmul_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv16i16_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vwmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i8> %1,
-    i8 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  i8,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwmul_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_vx_nxv32i16_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vwmul.vx v16, v8, a0
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwmul_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv32i16_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vwmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i8> %1,
-    i8 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  i16,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwmul_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_vx_nxv1i32_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vwmul.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwmul_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i32_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vwmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i16> %1,
-    i16 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  i16,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwmul_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_vx_nxv2i32_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vwmul.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwmul_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i32_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vwmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i16> %1,
-    i16 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  i16,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwmul_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_vx_nxv4i32_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vwmul.vx v10, v8, a0
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwmul_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i32_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vwmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i16> %1,
-    i16 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  i16,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwmul_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_vx_nxv8i32_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vwmul.vx v12, v8, a0
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwmul_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i32_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vwmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i16> %1,
-    i16 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  i16,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwmul_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_vx_nxv16i32_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vwmul.vx v16, v8, a0
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwmul_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv16i32_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vwmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i16> %1,
-    i16 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i32,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwmul_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_vx_nxv1i64_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vwmul.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwmul_mask_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i64_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vwmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i32,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwmul_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_vx_nxv2i64_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vwmul.vx v10, v8, a0
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwmul_mask_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i64_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vwmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i32,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwmul_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_vx_nxv4i64_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vwmul.vx v12, v8, a0
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwmul_mask_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i64_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vwmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i32,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwmul_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_vx_nxv8i64_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vwmul.vx v16, v8, a0
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwmul_mask_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i64_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vwmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmul.ll
similarity index 87%
rename from llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vwmul.ll
index eab3347a106322..7bf7f6b6d31439 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmul.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -19,7 +21,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -29,10 +31,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -44,7 +46,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -53,9 +55,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwmul_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwmul_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_vv_nxv2i16_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -67,7 +69,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -77,10 +79,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwmul_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwmul_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i16_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -92,7 +94,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -101,9 +103,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwmul_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwmul_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_vv_nxv4i16_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -115,7 +117,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -125,10 +127,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwmul_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwmul_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i16_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -140,7 +142,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -149,9 +151,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwmul_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwmul_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_vv_nxv8i16_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -163,7 +165,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -173,10 +175,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwmul_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwmul_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i16_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -188,7 +190,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -197,9 +199,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwmul_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwmul_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_vv_nxv16i16_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -211,7 +213,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -221,10 +223,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwmul_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwmul_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv16i16_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -236,7 +238,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -245,9 +247,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwmul_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwmul_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_vv_nxv32i16_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -259,7 +261,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -269,10 +271,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwmul_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwmul_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv32i16_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -284,7 +286,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -293,9 +295,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwmul_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwmul_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_vv_nxv1i32_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -307,7 +309,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -317,10 +319,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwmul_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwmul_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i32_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -332,7 +334,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -341,9 +343,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwmul_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwmul_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_vv_nxv2i32_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -355,7 +357,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -365,10 +367,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwmul_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwmul_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i32_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -380,7 +382,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -389,9 +391,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwmul_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwmul_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_vv_nxv4i32_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -403,7 +405,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -413,10 +415,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwmul_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwmul_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i32_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -428,7 +430,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -437,9 +439,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwmul_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwmul_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_vv_nxv8i32_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -451,7 +453,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -461,10 +463,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwmul_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwmul_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i32_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -476,7 +478,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -485,9 +487,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwmul_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwmul_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_vv_nxv16i32_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -499,7 +501,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -509,10 +511,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwmul_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwmul_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv16i32_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -524,7 +526,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -533,9 +535,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwmul_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwmul_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_vv_nxv1i64_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -547,7 +549,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -557,10 +559,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwmul_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwmul_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i64_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -572,7 +574,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -581,9 +583,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwmul_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwmul_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_vv_nxv2i64_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -595,7 +597,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -605,10 +607,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwmul_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwmul_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i64_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -620,7 +622,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -629,9 +631,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwmul_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwmul_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_vv_nxv4i64_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -643,7 +645,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -653,10 +655,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwmul_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwmul_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i64_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -668,7 +670,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -677,9 +679,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwmul_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwmul_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_vv_nxv8i64_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -691,7 +693,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -701,10 +703,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwmul_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwmul_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i64_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -716,7 +718,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -725,9 +727,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwmul_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwmul_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_vx_nxv1i16_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -739,7 +741,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -749,10 +751,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwmul_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwmul_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i16_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -764,7 +766,7 @@ entry:
     <vscale x 1 x i8> %1,
     i8 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -773,9 +775,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwmul_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwmul_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_vx_nxv2i16_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -787,7 +789,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -797,10 +799,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwmul_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwmul_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i16_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -812,7 +814,7 @@ entry:
     <vscale x 2 x i8> %1,
     i8 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -821,9 +823,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwmul_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwmul_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_vx_nxv4i16_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -835,7 +837,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -845,10 +847,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwmul_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwmul_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i16_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -860,7 +862,7 @@ entry:
     <vscale x 4 x i8> %1,
     i8 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -869,9 +871,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwmul_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwmul_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_vx_nxv8i16_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -883,7 +885,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -893,10 +895,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwmul_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwmul_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i16_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -908,7 +910,7 @@ entry:
     <vscale x 8 x i8> %1,
     i8 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -917,9 +919,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwmul_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwmul_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_vx_nxv16i16_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -931,7 +933,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -941,10 +943,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwmul_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwmul_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv16i16_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -956,7 +958,7 @@ entry:
     <vscale x 16 x i8> %1,
     i8 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -965,9 +967,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwmul_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwmul_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_vx_nxv32i16_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -979,7 +981,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -989,10 +991,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwmul_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwmul_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv32i16_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1004,7 +1006,7 @@ entry:
     <vscale x 32 x i8> %1,
     i8 %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1013,9 +1015,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwmul_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwmul_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_vx_nxv1i32_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1027,7 +1029,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1037,10 +1039,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwmul_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwmul_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i32_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1052,7 +1054,7 @@ entry:
     <vscale x 1 x i16> %1,
     i16 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1061,9 +1063,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwmul_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwmul_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_vx_nxv2i32_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1075,7 +1077,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1085,10 +1087,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwmul_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwmul_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i32_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1100,7 +1102,7 @@ entry:
     <vscale x 2 x i16> %1,
     i16 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1109,9 +1111,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwmul_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwmul_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_vx_nxv4i32_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1123,7 +1125,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1133,10 +1135,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwmul_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwmul_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i32_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1148,7 +1150,7 @@ entry:
     <vscale x 4 x i16> %1,
     i16 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1157,9 +1159,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwmul_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwmul_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_vx_nxv8i32_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1171,7 +1173,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1181,10 +1183,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwmul_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwmul_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i32_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1196,7 +1198,7 @@ entry:
     <vscale x 8 x i16> %1,
     i16 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1205,9 +1207,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwmul_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwmul_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_vx_nxv16i32_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1219,7 +1221,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1229,10 +1231,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwmul_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwmul_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv16i32_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1244,7 +1246,7 @@ entry:
     <vscale x 16 x i16> %1,
     i16 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1253,9 +1255,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwmul_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwmul_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_vx_nxv1i64_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1267,7 +1269,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1277,10 +1279,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwmul_mask_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwmul_mask_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i64_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1292,7 +1294,7 @@ entry:
     <vscale x 1 x i32> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1301,9 +1303,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwmul_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwmul_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_vx_nxv2i64_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1315,7 +1317,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1325,10 +1327,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwmul_mask_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwmul_mask_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i64_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1340,7 +1342,7 @@ entry:
     <vscale x 2 x i32> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1349,9 +1351,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwmul_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwmul_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_vx_nxv4i64_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1363,7 +1365,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1373,10 +1375,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwmul_mask_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwmul_mask_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i64_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1388,7 +1390,7 @@ entry:
     <vscale x 4 x i32> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1397,9 +1399,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwmul_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwmul_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_vx_nxv8i64_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1411,7 +1413,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1421,10 +1423,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwmul_mask_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwmul_mask_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i64_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1436,7 +1438,7 @@ entry:
     <vscale x 8 x i32> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll
deleted file mode 100644
index 246a7c9842023f..00000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll
+++ /dev/null
@@ -1,1442 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vwmulsu.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwmulsu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i16_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vwmulsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwmulsu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv2i16_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vwmulsu.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwmulsu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i16_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vwmulsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwmulsu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv4i16_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vwmulsu.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwmulsu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i16_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vwmulsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwmulsu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv8i16_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vwmulsu.vv v10, v8, v9
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwmulsu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i16_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vwmulsu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwmulsu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv16i16_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vwmulsu.vv v12, v8, v10
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwmulsu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv16i16_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vwmulsu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwmulsu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv32i16_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vwmulsu.vv v16, v8, v12
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwmulsu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv32i16_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vwmulsu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwmulsu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv1i32_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vwmulsu.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwmulsu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i32_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vwmulsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwmulsu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv2i32_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vwmulsu.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwmulsu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i32_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vwmulsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwmulsu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv4i32_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vwmulsu.vv v10, v8, v9
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwmulsu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i32_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vwmulsu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwmulsu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv8i32_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vwmulsu.vv v12, v8, v10
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwmulsu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i32_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vwmulsu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwmulsu_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv16i32_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vwmulsu.vv v16, v8, v12
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwmulsu_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv16i32_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vwmulsu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwmulsu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv1i64_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vwmulsu.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwmulsu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i64_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vwmulsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwmulsu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv2i64_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vwmulsu.vv v10, v8, v9
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwmulsu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i64_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vwmulsu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwmulsu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv4i64_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vwmulsu.vv v12, v8, v10
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwmulsu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i64_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vwmulsu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwmulsu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv8i64_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vwmulsu.vv v16, v8, v12
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwmulsu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i64_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vwmulsu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  i8,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwmulsu_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv1i16_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vwmulsu.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwmulsu_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i16_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vwmulsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i8> %1,
-    i8 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  i8,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwmulsu_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv2i16_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vwmulsu.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwmulsu_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i16_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vwmulsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i8> %1,
-    i8 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  i8,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwmulsu_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv4i16_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vwmulsu.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwmulsu_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i16_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vwmulsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i8> %1,
-    i8 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  i8,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwmulsu_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv8i16_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vwmulsu.vx v10, v8, a0
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwmulsu_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i16_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vwmulsu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i8> %1,
-    i8 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  i8,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwmulsu_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv16i16_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vwmulsu.vx v12, v8, a0
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwmulsu_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv16i16_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vwmulsu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i8> %1,
-    i8 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  i8,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwmulsu_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv32i16_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vwmulsu.vx v16, v8, a0
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwmulsu_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv32i16_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vwmulsu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i8> %1,
-    i8 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  i16,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwmulsu_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv1i32_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vwmulsu.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwmulsu_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i32_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vwmulsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i16> %1,
-    i16 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  i16,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwmulsu_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv2i32_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vwmulsu.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwmulsu_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i32_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vwmulsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i16> %1,
-    i16 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  i16,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwmulsu_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv4i32_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vwmulsu.vx v10, v8, a0
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwmulsu_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i32_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vwmulsu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i16> %1,
-    i16 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  i16,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwmulsu_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv8i32_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vwmulsu.vx v12, v8, a0
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwmulsu_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i32_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vwmulsu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i16> %1,
-    i16 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  i16,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwmulsu_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv16i32_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vwmulsu.vx v16, v8, a0
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwmulsu_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv16i32_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vwmulsu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i16> %1,
-    i16 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i32,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwmulsu_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv1i64_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vwmulsu.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwmulsu_mask_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i64_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vwmulsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i32,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwmulsu_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv2i64_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vwmulsu.vx v10, v8, a0
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwmulsu_mask_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i64_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vwmulsu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i32,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwmulsu_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv4i64_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vwmulsu.vx v12, v8, a0
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwmulsu_mask_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i64_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vwmulsu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i32,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwmulsu_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv8i64_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vwmulsu.vx v16, v8, a0
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwmulsu_mask_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i64_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vwmulsu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulsu.ll
similarity index 87%
rename from llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vwmulsu.ll
index 4853b538290526..4322f66d198dee 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmulsu.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -19,7 +21,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -29,10 +31,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwmulsu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwmulsu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i16_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -44,7 +46,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -53,9 +55,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwmulsu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwmulsu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv2i16_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -67,7 +69,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -77,10 +79,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwmulsu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwmulsu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i16_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -92,7 +94,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -101,9 +103,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwmulsu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwmulsu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv4i16_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -115,7 +117,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -125,10 +127,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwmulsu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwmulsu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i16_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -140,7 +142,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -149,9 +151,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwmulsu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwmulsu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv8i16_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -163,7 +165,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -173,10 +175,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwmulsu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwmulsu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i16_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -188,7 +190,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -197,9 +199,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwmulsu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwmulsu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv16i16_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -211,7 +213,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -221,10 +223,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwmulsu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwmulsu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv16i16_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -236,7 +238,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -245,9 +247,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwmulsu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwmulsu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv32i16_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -259,7 +261,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -269,10 +271,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwmulsu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwmulsu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv32i16_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -284,7 +286,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -293,9 +295,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwmulsu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwmulsu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv1i32_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -307,7 +309,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -317,10 +319,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwmulsu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwmulsu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i32_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -332,7 +334,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -341,9 +343,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwmulsu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwmulsu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv2i32_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -355,7 +357,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -365,10 +367,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwmulsu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwmulsu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i32_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -380,7 +382,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -389,9 +391,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwmulsu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwmulsu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv4i32_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -403,7 +405,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -413,10 +415,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwmulsu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwmulsu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i32_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -428,7 +430,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -437,9 +439,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwmulsu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwmulsu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv8i32_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -451,7 +453,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -461,10 +463,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwmulsu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwmulsu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i32_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -476,7 +478,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -485,9 +487,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwmulsu_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwmulsu_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv16i32_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -499,7 +501,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -509,10 +511,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwmulsu_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwmulsu_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv16i32_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -524,7 +526,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -533,9 +535,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwmulsu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwmulsu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv1i64_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -547,7 +549,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -557,10 +559,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwmulsu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwmulsu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i64_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -572,7 +574,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -581,9 +583,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwmulsu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwmulsu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv2i64_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -595,7 +597,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -605,10 +607,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwmulsu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwmulsu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i64_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -620,7 +622,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -629,9 +631,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwmulsu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwmulsu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv4i64_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -643,7 +645,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -653,10 +655,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwmulsu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwmulsu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i64_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -668,7 +670,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -677,9 +679,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwmulsu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwmulsu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv8i64_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -691,7 +693,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -701,10 +703,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwmulsu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwmulsu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i64_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -716,7 +718,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -725,9 +727,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwmulsu_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwmulsu_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv1i16_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -739,7 +741,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -749,10 +751,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwmulsu_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwmulsu_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i16_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -764,7 +766,7 @@ entry:
     <vscale x 1 x i8> %1,
     i8 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -773,9 +775,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwmulsu_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwmulsu_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv2i16_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -787,7 +789,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -797,10 +799,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwmulsu_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwmulsu_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i16_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -812,7 +814,7 @@ entry:
     <vscale x 2 x i8> %1,
     i8 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -821,9 +823,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwmulsu_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwmulsu_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv4i16_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -835,7 +837,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -845,10 +847,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwmulsu_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwmulsu_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i16_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -860,7 +862,7 @@ entry:
     <vscale x 4 x i8> %1,
     i8 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -869,9 +871,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwmulsu_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwmulsu_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv8i16_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -883,7 +885,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -893,10 +895,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwmulsu_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwmulsu_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i16_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -908,7 +910,7 @@ entry:
     <vscale x 8 x i8> %1,
     i8 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -917,9 +919,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwmulsu_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwmulsu_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv16i16_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -931,7 +933,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -941,10 +943,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwmulsu_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwmulsu_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv16i16_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -956,7 +958,7 @@ entry:
     <vscale x 16 x i8> %1,
     i8 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -965,9 +967,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwmulsu_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwmulsu_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv32i16_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -979,7 +981,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -989,10 +991,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwmulsu_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwmulsu_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv32i16_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1004,7 +1006,7 @@ entry:
     <vscale x 32 x i8> %1,
     i8 %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1013,9 +1015,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwmulsu_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwmulsu_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv1i32_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1027,7 +1029,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1037,10 +1039,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwmulsu_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwmulsu_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i32_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1052,7 +1054,7 @@ entry:
     <vscale x 1 x i16> %1,
     i16 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1061,9 +1063,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwmulsu_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwmulsu_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv2i32_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1075,7 +1077,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1085,10 +1087,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwmulsu_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwmulsu_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i32_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1100,7 +1102,7 @@ entry:
     <vscale x 2 x i16> %1,
     i16 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1109,9 +1111,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwmulsu_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwmulsu_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv4i32_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1123,7 +1125,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1133,10 +1135,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwmulsu_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwmulsu_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i32_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1148,7 +1150,7 @@ entry:
     <vscale x 4 x i16> %1,
     i16 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1157,9 +1159,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwmulsu_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwmulsu_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv8i32_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1171,7 +1173,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1181,10 +1183,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwmulsu_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwmulsu_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i32_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1196,7 +1198,7 @@ entry:
     <vscale x 8 x i16> %1,
     i16 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1205,9 +1207,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwmulsu_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwmulsu_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv16i32_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1219,7 +1221,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1229,10 +1231,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwmulsu_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwmulsu_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv16i32_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1244,7 +1246,7 @@ entry:
     <vscale x 16 x i16> %1,
     i16 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1253,9 +1255,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwmulsu_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwmulsu_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv1i64_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1267,7 +1269,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1277,10 +1279,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwmulsu_mask_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwmulsu_mask_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i64_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1292,7 +1294,7 @@ entry:
     <vscale x 1 x i32> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1301,9 +1303,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwmulsu_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwmulsu_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv2i64_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1315,7 +1317,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1325,10 +1327,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwmulsu_mask_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwmulsu_mask_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i64_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1340,7 +1342,7 @@ entry:
     <vscale x 2 x i32> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1349,9 +1351,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwmulsu_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwmulsu_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv4i64_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1363,7 +1365,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1373,10 +1375,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwmulsu_mask_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwmulsu_mask_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i64_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1388,7 +1390,7 @@ entry:
     <vscale x 4 x i32> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1397,9 +1399,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwmulsu_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwmulsu_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv8i64_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1411,7 +1413,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1421,10 +1423,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwmulsu_mask_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwmulsu_mask_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i64_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1436,7 +1438,7 @@ entry:
     <vscale x 8 x i32> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll
deleted file mode 100644
index 3723806548eee7..00000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll
+++ /dev/null
@@ -1,1442 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vwmulu.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwmulu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i16_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vwmulu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwmulu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_vv_nxv2i16_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vwmulu.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwmulu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i16_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vwmulu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwmulu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_vv_nxv4i16_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vwmulu.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwmulu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i16_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vwmulu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwmulu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_vv_nxv8i16_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vwmulu.vv v10, v8, v9
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwmulu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i16_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vwmulu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwmulu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_vv_nxv16i16_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vwmulu.vv v12, v8, v10
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwmulu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv16i16_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vwmulu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwmulu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_vv_nxv32i16_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vwmulu.vv v16, v8, v12
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwmulu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv32i16_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vwmulu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwmulu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_vv_nxv1i32_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vwmulu.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwmulu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i32_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vwmulu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwmulu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_vv_nxv2i32_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vwmulu.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwmulu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i32_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vwmulu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwmulu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_vv_nxv4i32_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vwmulu.vv v10, v8, v9
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwmulu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i32_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vwmulu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwmulu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_vv_nxv8i32_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vwmulu.vv v12, v8, v10
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwmulu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i32_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vwmulu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwmulu_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_vv_nxv16i32_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vwmulu.vv v16, v8, v12
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwmulu_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv16i32_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vwmulu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwmulu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_vv_nxv1i64_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vwmulu.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwmulu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i64_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vwmulu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwmulu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_vv_nxv2i64_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vwmulu.vv v10, v8, v9
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwmulu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i64_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vwmulu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwmulu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_vv_nxv4i64_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vwmulu.vv v12, v8, v10
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwmulu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i64_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vwmulu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwmulu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_vv_nxv8i64_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vwmulu.vv v16, v8, v12
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwmulu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i64_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vwmulu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  i8,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwmulu_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_vx_nxv1i16_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vwmulu.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwmulu_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i16_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vwmulu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i8> %1,
-    i8 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  i8,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwmulu_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_vx_nxv2i16_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vwmulu.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwmulu_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i16_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vwmulu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i8> %1,
-    i8 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  i8,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwmulu_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_vx_nxv4i16_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vwmulu.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwmulu_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i16_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vwmulu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i8> %1,
-    i8 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  i8,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwmulu_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_vx_nxv8i16_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vwmulu.vx v10, v8, a0
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwmulu_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i16_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vwmulu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i8> %1,
-    i8 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  i8,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwmulu_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_vx_nxv16i16_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vwmulu.vx v12, v8, a0
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwmulu_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv16i16_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vwmulu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i8> %1,
-    i8 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  i8,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwmulu_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_vx_nxv32i16_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vwmulu.vx v16, v8, a0
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwmulu_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv32i16_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vwmulu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i8> %1,
-    i8 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  i16,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwmulu_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_vx_nxv1i32_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vwmulu.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwmulu_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i32_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vwmulu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i16> %1,
-    i16 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  i16,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwmulu_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_vx_nxv2i32_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vwmulu.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwmulu_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i32_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vwmulu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i16> %1,
-    i16 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  i16,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwmulu_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_vx_nxv4i32_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vwmulu.vx v10, v8, a0
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwmulu_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i32_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vwmulu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i16> %1,
-    i16 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  i16,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwmulu_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_vx_nxv8i32_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vwmulu.vx v12, v8, a0
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwmulu_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i32_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vwmulu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i16> %1,
-    i16 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  i16,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwmulu_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_vx_nxv16i32_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vwmulu.vx v16, v8, a0
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwmulu_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv16i32_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vwmulu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i16> %1,
-    i16 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i32,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwmulu_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_vx_nxv1i64_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vwmulu.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwmulu_mask_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i64_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vwmulu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i32,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwmulu_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_vx_nxv2i64_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vwmulu.vx v10, v8, a0
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwmulu_mask_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i64_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vwmulu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i32,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwmulu_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_vx_nxv4i64_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vwmulu.vx v12, v8, a0
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwmulu_mask_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i64_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vwmulu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i32,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwmulu_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_vx_nxv8i64_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vwmulu.vx v16, v8, a0
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwmulu_mask_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i64_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vwmulu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulu.ll
similarity index 87%
rename from llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vwmulu.ll
index 8255d3bb132f24..e56ebe8df44d91 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmulu.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -19,7 +21,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -29,10 +31,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwmulu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwmulu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i16_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -44,7 +46,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -53,9 +55,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwmulu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwmulu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv2i16_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -67,7 +69,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -77,10 +79,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwmulu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwmulu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i16_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -92,7 +94,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -101,9 +103,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwmulu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwmulu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv4i16_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -115,7 +117,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -125,10 +127,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwmulu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwmulu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i16_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -140,7 +142,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -149,9 +151,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwmulu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwmulu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv8i16_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -163,7 +165,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -173,10 +175,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwmulu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwmulu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i16_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -188,7 +190,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -197,9 +199,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwmulu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwmulu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv16i16_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -211,7 +213,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -221,10 +223,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwmulu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwmulu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv16i16_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -236,7 +238,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -245,9 +247,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwmulu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwmulu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv32i16_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -259,7 +261,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -269,10 +271,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwmulu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwmulu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv32i16_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -284,7 +286,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -293,9 +295,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwmulu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwmulu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv1i32_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -307,7 +309,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -317,10 +319,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwmulu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwmulu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i32_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -332,7 +334,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -341,9 +343,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwmulu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwmulu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv2i32_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -355,7 +357,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -365,10 +367,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwmulu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwmulu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i32_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -380,7 +382,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -389,9 +391,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwmulu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwmulu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv4i32_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -403,7 +405,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -413,10 +415,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwmulu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwmulu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i32_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -428,7 +430,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -437,9 +439,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwmulu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwmulu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv8i32_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -451,7 +453,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -461,10 +463,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwmulu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwmulu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i32_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -476,7 +478,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -485,9 +487,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwmulu_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwmulu_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv16i32_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -499,7 +501,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -509,10 +511,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwmulu_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwmulu_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv16i32_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -524,7 +526,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -533,9 +535,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwmulu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwmulu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv1i64_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -547,7 +549,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -557,10 +559,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwmulu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwmulu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i64_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -572,7 +574,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -581,9 +583,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwmulu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwmulu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv2i64_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -595,7 +597,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -605,10 +607,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwmulu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwmulu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i64_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -620,7 +622,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -629,9 +631,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwmulu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwmulu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv4i64_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -643,7 +645,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -653,10 +655,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwmulu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwmulu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i64_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -668,7 +670,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -677,9 +679,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwmulu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwmulu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv8i64_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -691,7 +693,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -701,10 +703,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwmulu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwmulu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i64_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -716,7 +718,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -725,9 +727,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwmulu_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwmulu_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv1i16_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -739,7 +741,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -749,10 +751,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwmulu_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwmulu_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i16_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -764,7 +766,7 @@ entry:
     <vscale x 1 x i8> %1,
     i8 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -773,9 +775,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwmulu_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwmulu_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv2i16_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -787,7 +789,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -797,10 +799,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwmulu_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwmulu_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i16_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -812,7 +814,7 @@ entry:
     <vscale x 2 x i8> %1,
     i8 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -821,9 +823,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwmulu_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwmulu_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv4i16_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -835,7 +837,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -845,10 +847,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwmulu_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwmulu_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i16_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -860,7 +862,7 @@ entry:
     <vscale x 4 x i8> %1,
     i8 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -869,9 +871,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwmulu_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwmulu_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv8i16_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -883,7 +885,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -893,10 +895,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwmulu_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwmulu_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i16_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -908,7 +910,7 @@ entry:
     <vscale x 8 x i8> %1,
     i8 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -917,9 +919,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwmulu_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwmulu_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv16i16_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -931,7 +933,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -941,10 +943,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwmulu_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwmulu_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv16i16_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -956,7 +958,7 @@ entry:
     <vscale x 16 x i8> %1,
     i8 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -965,9 +967,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwmulu_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwmulu_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv32i16_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -979,7 +981,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -989,10 +991,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwmulu_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwmulu_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv32i16_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1004,7 +1006,7 @@ entry:
     <vscale x 32 x i8> %1,
     i8 %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1013,9 +1015,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwmulu_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwmulu_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv1i32_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1027,7 +1029,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1037,10 +1039,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwmulu_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwmulu_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i32_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1052,7 +1054,7 @@ entry:
     <vscale x 1 x i16> %1,
     i16 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1061,9 +1063,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwmulu_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwmulu_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv2i32_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1075,7 +1077,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1085,10 +1087,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwmulu_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwmulu_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i32_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1100,7 +1102,7 @@ entry:
     <vscale x 2 x i16> %1,
     i16 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1109,9 +1111,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwmulu_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwmulu_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv4i32_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1123,7 +1125,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1133,10 +1135,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwmulu_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwmulu_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i32_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1148,7 +1150,7 @@ entry:
     <vscale x 4 x i16> %1,
     i16 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1157,9 +1159,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwmulu_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwmulu_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv8i32_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1171,7 +1173,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1181,10 +1183,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwmulu_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwmulu_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i32_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1196,7 +1198,7 @@ entry:
     <vscale x 8 x i16> %1,
     i16 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1205,9 +1207,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwmulu_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwmulu_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv16i32_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1219,7 +1221,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1229,10 +1231,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwmulu_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwmulu_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv16i32_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1244,7 +1246,7 @@ entry:
     <vscale x 16 x i16> %1,
     i16 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1253,9 +1255,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwmulu_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwmulu_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv1i64_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1267,7 +1269,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1277,10 +1279,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwmulu_mask_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwmulu_mask_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i64_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1292,7 +1294,7 @@ entry:
     <vscale x 1 x i32> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1301,9 +1303,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwmulu_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwmulu_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv2i64_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1315,7 +1317,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1325,10 +1327,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwmulu_mask_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwmulu_mask_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i64_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1340,7 +1342,7 @@ entry:
     <vscale x 2 x i32> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1349,9 +1351,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwmulu_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwmulu_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv4i64_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1363,7 +1365,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1373,10 +1375,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwmulu_mask_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwmulu_mask_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i64_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1388,7 +1390,7 @@ entry:
     <vscale x 4 x i32> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1397,9 +1399,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwmulu_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwmulu_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv8i64_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1411,7 +1413,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1421,10 +1423,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwmulu_mask_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwmulu_mask_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i64_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1436,7 +1438,7 @@ entry:
     <vscale x 8 x i32> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv32.ll
deleted file mode 100644
index dcc7b9071f27d4..00000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv32.ll
+++ /dev/null
@@ -1,830 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv1i8(
-  <vscale x 4 x i16>,
-  <vscale x 1 x i8>,
-  <vscale x 4 x i16>,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv1i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 4 x i16> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 1 x i8>,
-  <vscale x 4 x i16>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv1i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv2i8(
-  <vscale x 4 x i16>,
-  <vscale x 2 x i8>,
-  <vscale x 4 x i16>,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv2i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv2i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 4 x i16> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 2 x i8>,
-  <vscale x 4 x i16>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv2i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i16>,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv4i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv4i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i16> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv4i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv8i8(
-  <vscale x 4 x i16>,
-  <vscale x 8 x i8>,
-  <vscale x 4 x i16>,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv8i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv8i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 4 x i16> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 8 x i8>,
-  <vscale x 4 x i16>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv8i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv16i8(
-  <vscale x 4 x i16>,
-  <vscale x 16 x i8>,
-  <vscale x 4 x i16>,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv16i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vwredsum.vs v8, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv16i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 4 x i16> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 16 x i8>,
-  <vscale x 4 x i16>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv16i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vwredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv32i8(
-  <vscale x 4 x i16>,
-  <vscale x 32 x i8>,
-  <vscale x 4 x i16>,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv32i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vwredsum.vs v8, v12, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv32i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 4 x i16> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 32 x i8>,
-  <vscale x 4 x i16>,
-  <vscale x 32 x i1>,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv32i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vwredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 32 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv64i8(
-  <vscale x 4 x i16>,
-  <vscale x 64 x i8>,
-  <vscale x 4 x i16>,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv64i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vwredsum.vs v8, v16, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv64i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 64 x i8> %1,
-    <vscale x 4 x i16> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 64 x i8>,
-  <vscale x 4 x i16>,
-  <vscale x 64 x i1>,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv64i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vwredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 64 x i8> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 64 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv1i16(
-  <vscale x 2 x i32>,
-  <vscale x 1 x i16>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv1i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv1i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 1 x i16>,
-  <vscale x 2 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv1i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv2i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv2i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv4i16(
-  <vscale x 2 x i32>,
-  <vscale x 4 x i16>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv4i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv4i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 4 x i16>,
-  <vscale x 2 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv4i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv8i16(
-  <vscale x 2 x i32>,
-  <vscale x 8 x i16>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv8i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vwredsum.vs v8, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv8i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 8 x i16>,
-  <vscale x 2 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv8i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vwredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv16i16(
-  <vscale x 2 x i32>,
-  <vscale x 16 x i16>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv16i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vwredsum.vs v8, v12, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv16i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 16 x i16>,
-  <vscale x 2 x i32>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv16i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vwredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv32i16(
-  <vscale x 2 x i32>,
-  <vscale x 32 x i16>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv32i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vwredsum.vs v8, v16, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv32i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 32 x i16> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 32 x i16>,
-  <vscale x 2 x i32>,
-  <vscale x 32 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv32i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vwredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 32 x i16> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 32 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv2i32(
-  <vscale x 1 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv2i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv2i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 1 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv2i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv4i32(
-  <vscale x 1 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv4i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vwredsum.vs v8, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv4i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 1 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv4i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vwredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv8i32(
-  <vscale x 1 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv8i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vwredsum.vs v8, v12, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv8i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 1 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv8i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vwredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv16i32(
-  <vscale x 1 x i64>,
-  <vscale x 16 x i32>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv16i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vwredsum.vs v8, v16, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv16i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 16 x i32> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 16 x i32>,
-  <vscale x 1 x i64>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv16i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vwredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 16 x i32> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwredsum.ll
similarity index 89%
rename from llvm/test/CodeGen/RISCV/rvv/vwredsum-rv64.ll
rename to llvm/test/CodeGen/RISCV/rvv/vwredsum.ll
index a39a01b7a626d3..5f9d0fcceb168a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwredsum.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
 declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv1i8(
   <vscale x 4 x i16>,
   <vscale x 1 x i8>,
   <vscale x 4 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -18,7 +20,7 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 1 x i8> %1,
     <vscale x 4 x i16> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i16> %a
 }
@@ -28,9 +30,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.nxv4i16(
   <vscale x 1 x i8>,
   <vscale x 4 x i16>,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv1i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -42,7 +44,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 4 x i16> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i16> %a
 }
@@ -51,9 +53,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv2i8(
   <vscale x 4 x i16>,
   <vscale x 2 x i8>,
   <vscale x 4 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv2i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -64,7 +66,7 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 2 x i8> %1,
     <vscale x 4 x i16> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i16> %a
 }
@@ -74,9 +76,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.nxv4i16(
   <vscale x 2 x i8>,
   <vscale x 4 x i16>,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv2i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -88,7 +90,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 4 x i16> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i16> %a
 }
@@ -97,9 +99,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv4i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i8>,
   <vscale x 4 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -110,7 +112,7 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x i8> %1,
     <vscale x 4 x i16> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i16> %a
 }
@@ -120,9 +122,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.nxv4i16(
   <vscale x 4 x i8>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -134,7 +136,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i16> %a
 }
@@ -143,9 +145,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv8i8(
   <vscale x 4 x i16>,
   <vscale x 8 x i8>,
   <vscale x 4 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv8i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -156,7 +158,7 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 8 x i8> %1,
     <vscale x 4 x i16> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i16> %a
 }
@@ -166,9 +168,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.nxv4i16(
   <vscale x 8 x i8>,
   <vscale x 4 x i16>,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv8i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -180,7 +182,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 4 x i16> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i16> %a
 }
@@ -189,9 +191,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv16i8(
   <vscale x 4 x i16>,
   <vscale x 16 x i8>,
   <vscale x 4 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv16i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -202,7 +204,7 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 16 x i8> %1,
     <vscale x 4 x i16> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i16> %a
 }
@@ -212,9 +214,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.nxv4i16(
   <vscale x 16 x i8>,
   <vscale x 4 x i16>,
   <vscale x 16 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv16i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -226,7 +228,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 4 x i16> %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i16> %a
 }
@@ -235,9 +237,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv32i8(
   <vscale x 4 x i16>,
   <vscale x 32 x i8>,
   <vscale x 4 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv32i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -248,7 +250,7 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 32 x i8> %1,
     <vscale x 4 x i16> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i16> %a
 }
@@ -258,9 +260,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.nxv4i16(
   <vscale x 32 x i8>,
   <vscale x 4 x i16>,
   <vscale x 32 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv32i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -272,7 +274,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 4 x i16> %2,
     <vscale x 32 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i16> %a
 }
@@ -281,9 +283,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv64i8(
   <vscale x 4 x i16>,
   <vscale x 64 x i8>,
   <vscale x 4 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv64i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
@@ -294,7 +296,7 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 64 x i8> %1,
     <vscale x 4 x i16> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i16> %a
 }
@@ -304,9 +306,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.nxv4i16(
   <vscale x 64 x i8>,
   <vscale x 4 x i16>,
   <vscale x 64 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv64i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
@@ -318,7 +320,7 @@ entry:
     <vscale x 64 x i8> %1,
     <vscale x 4 x i16> %2,
     <vscale x 64 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i16> %a
 }
@@ -327,9 +329,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv1i16(
   <vscale x 2 x i32>,
   <vscale x 1 x i16>,
   <vscale x 2 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv1i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -340,7 +342,7 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 1 x i16> %1,
     <vscale x 2 x i32> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i32> %a
 }
@@ -350,9 +352,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.nxv2i32(
   <vscale x 1 x i16>,
   <vscale x 2 x i32>,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv1i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -364,7 +366,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 2 x i32> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i32> %a
 }
@@ -373,9 +375,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv2i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i16>,
   <vscale x 2 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -386,7 +388,7 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x i16> %1,
     <vscale x 2 x i32> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i32> %a
 }
@@ -396,9 +398,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.nxv2i32(
   <vscale x 2 x i16>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -410,7 +412,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i32> %a
 }
@@ -419,9 +421,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv4i16(
   <vscale x 2 x i32>,
   <vscale x 4 x i16>,
   <vscale x 2 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv4i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -432,7 +434,7 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 4 x i16> %1,
     <vscale x 2 x i32> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i32> %a
 }
@@ -442,9 +444,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.nxv2i32(
   <vscale x 4 x i16>,
   <vscale x 2 x i32>,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv4i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -456,7 +458,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 2 x i32> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i32> %a
 }
@@ -465,9 +467,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv8i16(
   <vscale x 2 x i32>,
   <vscale x 8 x i16>,
   <vscale x 2 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv8i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -478,7 +480,7 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 8 x i16> %1,
     <vscale x 2 x i32> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i32> %a
 }
@@ -488,9 +490,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.nxv2i32(
   <vscale x 8 x i16>,
   <vscale x 2 x i32>,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv8i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -502,7 +504,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 2 x i32> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i32> %a
 }
@@ -511,9 +513,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv16i16(
   <vscale x 2 x i32>,
   <vscale x 16 x i16>,
   <vscale x 2 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv16i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -524,7 +526,7 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 16 x i16> %1,
     <vscale x 2 x i32> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i32> %a
 }
@@ -534,9 +536,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.nxv2i32(
   <vscale x 16 x i16>,
   <vscale x 2 x i32>,
   <vscale x 16 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv16i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -548,7 +550,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 2 x i32> %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i32> %a
 }
@@ -557,9 +559,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv32i16(
   <vscale x 2 x i32>,
   <vscale x 32 x i16>,
   <vscale x 2 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv32i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
@@ -570,7 +572,7 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 32 x i16> %1,
     <vscale x 2 x i32> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i32> %a
 }
@@ -580,9 +582,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.nxv2i32(
   <vscale x 32 x i16>,
   <vscale x 2 x i32>,
   <vscale x 32 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv32i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
@@ -594,7 +596,7 @@ entry:
     <vscale x 32 x i16> %1,
     <vscale x 2 x i32> %2,
     <vscale x 32 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i32> %a
 }
@@ -603,9 +605,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv1i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i32>,
   <vscale x 1 x i64>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -616,7 +618,7 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x i32> %1,
     <vscale x 1 x i64> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i64> %a
 }
@@ -626,9 +628,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.nxv1i64(
   <vscale x 1 x i32>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -640,7 +642,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i64> %a
 }
@@ -649,9 +651,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv2i32(
   <vscale x 1 x i64>,
   <vscale x 2 x i32>,
   <vscale x 1 x i64>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv2i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -662,7 +664,7 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 2 x i32> %1,
     <vscale x 1 x i64> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i64> %a
 }
@@ -672,9 +674,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.nxv1i64(
   <vscale x 2 x i32>,
   <vscale x 1 x i64>,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv2i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -686,7 +688,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 1 x i64> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i64> %a
 }
@@ -695,9 +697,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv4i32(
   <vscale x 1 x i64>,
   <vscale x 4 x i32>,
   <vscale x 1 x i64>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv4i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -708,7 +710,7 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 4 x i32> %1,
     <vscale x 1 x i64> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i64> %a
 }
@@ -718,9 +720,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.nxv1i64(
   <vscale x 4 x i32>,
   <vscale x 1 x i64>,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv4i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -732,7 +734,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 1 x i64> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i64> %a
 }
@@ -741,9 +743,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv8i32(
   <vscale x 1 x i64>,
   <vscale x 8 x i32>,
   <vscale x 1 x i64>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv8i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -754,7 +756,7 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 8 x i32> %1,
     <vscale x 1 x i64> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i64> %a
 }
@@ -764,9 +766,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.nxv1i64(
   <vscale x 8 x i32>,
   <vscale x 1 x i64>,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv8i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -778,7 +780,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 1 x i64> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i64> %a
 }
@@ -787,9 +789,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv16i32(
   <vscale x 1 x i64>,
   <vscale x 16 x i32>,
   <vscale x 1 x i64>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv16i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
@@ -800,7 +802,7 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 16 x i32> %1,
     <vscale x 1 x i64> %2,
-    i64 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i64> %a
 }
@@ -810,9 +812,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.nxv1i64(
   <vscale x 16 x i32>,
   <vscale x 1 x i64>,
   <vscale x 16 x i1>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv16i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
@@ -824,7 +826,7 @@ entry:
     <vscale x 16 x i32> %1,
     <vscale x 1 x i64> %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv64.ll
deleted file mode 100644
index 0440b0b27251a1..00000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv64.ll
+++ /dev/null
@@ -1,830 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv1i8(
-  <vscale x 4 x i16>,
-  <vscale x 1 x i8>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv1i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 4 x i16> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 1 x i8>,
-  <vscale x 4 x i16>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv1i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv2i8(
-  <vscale x 4 x i16>,
-  <vscale x 2 x i8>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv2i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv2i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 4 x i16> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 2 x i8>,
-  <vscale x 4 x i16>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv2i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv4i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv4i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i16> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv4i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv8i8(
-  <vscale x 4 x i16>,
-  <vscale x 8 x i8>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv8i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv8i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 4 x i16> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 8 x i8>,
-  <vscale x 4 x i16>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv8i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv16i8(
-  <vscale x 4 x i16>,
-  <vscale x 16 x i8>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv16i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vwredsumu.vs v8, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv16i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 4 x i16> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 16 x i8>,
-  <vscale x 4 x i16>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv16i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vwredsumu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv32i8(
-  <vscale x 4 x i16>,
-  <vscale x 32 x i8>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv32i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vwredsumu.vs v8, v12, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv32i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 4 x i16> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 32 x i8>,
-  <vscale x 4 x i16>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv32i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vwredsumu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv64i8(
-  <vscale x 4 x i16>,
-  <vscale x 64 x i8>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv64i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vwredsumu.vs v8, v16, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv64i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 64 x i8> %1,
-    <vscale x 4 x i16> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 64 x i8>,
-  <vscale x 4 x i16>,
-  <vscale x 64 x i1>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv64i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vwredsumu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 64 x i8> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 64 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv1i16(
-  <vscale x 2 x i32>,
-  <vscale x 1 x i16>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv1i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv1i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 1 x i16>,
-  <vscale x 2 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv1i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv2i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv2i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv4i16(
-  <vscale x 2 x i32>,
-  <vscale x 4 x i16>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv4i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv4i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 4 x i16>,
-  <vscale x 2 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv4i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv8i16(
-  <vscale x 2 x i32>,
-  <vscale x 8 x i16>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv8i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vwredsumu.vs v8, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv8i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 8 x i16>,
-  <vscale x 2 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv8i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vwredsumu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv16i16(
-  <vscale x 2 x i32>,
-  <vscale x 16 x i16>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv16i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vwredsumu.vs v8, v12, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv16i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 16 x i16>,
-  <vscale x 2 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv16i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vwredsumu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv32i16(
-  <vscale x 2 x i32>,
-  <vscale x 32 x i16>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv32i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vwredsumu.vs v8, v16, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv32i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 32 x i16> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 32 x i16>,
-  <vscale x 2 x i32>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv32i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vwredsumu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 32 x i16> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv2i32(
-  <vscale x 1 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv2i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv2i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 1 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv2i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv4i32(
-  <vscale x 1 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv4i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vwredsumu.vs v8, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv4i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 1 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv4i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vwredsumu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv8i32(
-  <vscale x 1 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv8i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vwredsumu.vs v8, v12, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv8i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 1 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv8i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vwredsumu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv16i32(
-  <vscale x 1 x i64>,
-  <vscale x 16 x i32>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv16i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vwredsumu.vs v8, v16, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv16i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 16 x i32> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 16 x i32>,
-  <vscale x 1 x i64>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv16i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vwredsumu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 16 x i32> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwredsumu.ll
similarity index 89%
rename from llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vwredsumu.ll
index 972cf285f440ee..c7dffc15bb4848 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwredsumu.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
 declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv1i8(
   <vscale x 4 x i16>,
   <vscale x 1 x i8>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -18,7 +20,7 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 1 x i8> %1,
     <vscale x 4 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i16> %a
 }
@@ -28,9 +30,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.nxv4i16(
   <vscale x 1 x i8>,
   <vscale x 4 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv1i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -42,7 +44,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 4 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i16> %a
 }
@@ -51,9 +53,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv2i8(
   <vscale x 4 x i16>,
   <vscale x 2 x i8>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv2i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -64,7 +66,7 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 2 x i8> %1,
     <vscale x 4 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i16> %a
 }
@@ -74,9 +76,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.nxv4i16(
   <vscale x 2 x i8>,
   <vscale x 4 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv2i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -88,7 +90,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 4 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i16> %a
 }
@@ -97,9 +99,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv4i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i8>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -110,7 +112,7 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x i8> %1,
     <vscale x 4 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i16> %a
 }
@@ -120,9 +122,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.nxv4i16(
   <vscale x 4 x i8>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -134,7 +136,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i16> %a
 }
@@ -143,9 +145,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv8i8(
   <vscale x 4 x i16>,
   <vscale x 8 x i8>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv8i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -156,7 +158,7 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 8 x i8> %1,
     <vscale x 4 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i16> %a
 }
@@ -166,9 +168,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.nxv4i16(
   <vscale x 8 x i8>,
   <vscale x 4 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv8i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -180,7 +182,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 4 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i16> %a
 }
@@ -189,9 +191,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv16i8(
   <vscale x 4 x i16>,
   <vscale x 16 x i8>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv16i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -202,7 +204,7 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 16 x i8> %1,
     <vscale x 4 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i16> %a
 }
@@ -212,9 +214,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.nxv4i16(
   <vscale x 16 x i8>,
   <vscale x 4 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv16i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -226,7 +228,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 4 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i16> %a
 }
@@ -235,9 +237,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv32i8(
   <vscale x 4 x i16>,
   <vscale x 32 x i8>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv32i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -248,7 +250,7 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 32 x i8> %1,
     <vscale x 4 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i16> %a
 }
@@ -258,9 +260,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.nxv4i16(
   <vscale x 32 x i8>,
   <vscale x 4 x i16>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv32i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -272,7 +274,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 4 x i16> %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i16> %a
 }
@@ -281,9 +283,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv64i8(
   <vscale x 4 x i16>,
   <vscale x 64 x i8>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv64i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
@@ -294,7 +296,7 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 64 x i8> %1,
     <vscale x 4 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i16> %a
 }
@@ -304,9 +306,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.nxv4i16(
   <vscale x 64 x i8>,
   <vscale x 4 x i16>,
   <vscale x 64 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv64i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
@@ -318,7 +320,7 @@ entry:
     <vscale x 64 x i8> %1,
     <vscale x 4 x i16> %2,
     <vscale x 64 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 4 x i16> %a
 }
@@ -327,9 +329,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv1i16(
   <vscale x 2 x i32>,
   <vscale x 1 x i16>,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv1i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -340,7 +342,7 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 1 x i16> %1,
     <vscale x 2 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i32> %a
 }
@@ -350,9 +352,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.nxv2i32(
   <vscale x 1 x i16>,
   <vscale x 2 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv1i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -364,7 +366,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 2 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i32> %a
 }
@@ -373,9 +375,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv2i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i16>,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -386,7 +388,7 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x i16> %1,
     <vscale x 2 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i32> %a
 }
@@ -396,9 +398,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.nxv2i32(
   <vscale x 2 x i16>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -410,7 +412,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i32> %a
 }
@@ -419,9 +421,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv4i16(
   <vscale x 2 x i32>,
   <vscale x 4 x i16>,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv4i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -432,7 +434,7 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 4 x i16> %1,
     <vscale x 2 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i32> %a
 }
@@ -442,9 +444,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.nxv2i32(
   <vscale x 4 x i16>,
   <vscale x 2 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv4i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -456,7 +458,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 2 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i32> %a
 }
@@ -465,9 +467,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv8i16(
   <vscale x 2 x i32>,
   <vscale x 8 x i16>,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv8i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -478,7 +480,7 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 8 x i16> %1,
     <vscale x 2 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i32> %a
 }
@@ -488,9 +490,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.nxv2i32(
   <vscale x 8 x i16>,
   <vscale x 2 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv8i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -502,7 +504,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 2 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i32> %a
 }
@@ -511,9 +513,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv16i16(
   <vscale x 2 x i32>,
   <vscale x 16 x i16>,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv16i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -524,7 +526,7 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 16 x i16> %1,
     <vscale x 2 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i32> %a
 }
@@ -534,9 +536,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.nxv2i32(
   <vscale x 16 x i16>,
   <vscale x 2 x i32>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv16i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -548,7 +550,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 2 x i32> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i32> %a
 }
@@ -557,9 +559,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv32i16(
   <vscale x 2 x i32>,
   <vscale x 32 x i16>,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv32i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
@@ -570,7 +572,7 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 32 x i16> %1,
     <vscale x 2 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i32> %a
 }
@@ -580,9 +582,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.nxv2i32(
   <vscale x 32 x i16>,
   <vscale x 2 x i32>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv32i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
@@ -594,7 +596,7 @@ entry:
     <vscale x 32 x i16> %1,
     <vscale x 2 x i32> %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 2 x i32> %a
 }
@@ -603,9 +605,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv1i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i32>,
   <vscale x 1 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -616,7 +618,7 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x i32> %1,
     <vscale x 1 x i64> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i64> %a
 }
@@ -626,9 +628,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.nxv1i64(
   <vscale x 1 x i32>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -640,7 +642,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i64> %a
 }
@@ -649,9 +651,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv2i32(
   <vscale x 1 x i64>,
   <vscale x 2 x i32>,
   <vscale x 1 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv2i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -662,7 +664,7 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 2 x i32> %1,
     <vscale x 1 x i64> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i64> %a
 }
@@ -672,9 +674,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.nxv1i64(
   <vscale x 2 x i32>,
   <vscale x 1 x i64>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv2i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -686,7 +688,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 1 x i64> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i64> %a
 }
@@ -695,9 +697,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv4i32(
   <vscale x 1 x i64>,
   <vscale x 4 x i32>,
   <vscale x 1 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv4i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -708,7 +710,7 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 4 x i32> %1,
     <vscale x 1 x i64> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i64> %a
 }
@@ -718,9 +720,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.nxv1i64(
   <vscale x 4 x i32>,
   <vscale x 1 x i64>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv4i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -732,7 +734,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 1 x i64> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i64> %a
 }
@@ -741,9 +743,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv8i32(
   <vscale x 1 x i64>,
   <vscale x 8 x i32>,
   <vscale x 1 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv8i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -754,7 +756,7 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 8 x i32> %1,
     <vscale x 1 x i64> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i64> %a
 }
@@ -764,9 +766,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.nxv1i64(
   <vscale x 8 x i32>,
   <vscale x 1 x i64>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv8i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -778,7 +780,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 1 x i64> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i64> %a
 }
@@ -787,9 +789,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv16i32(
   <vscale x 1 x i64>,
   <vscale x 16 x i32>,
   <vscale x 1 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv16i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
@@ -800,7 +802,7 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 16 x i32> %1,
     <vscale x 1 x i64> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i64> %a
 }
@@ -810,9 +812,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.nxv1i64(
   <vscale x 16 x i32>,
   <vscale x 1 x i64>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv16i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
@@ -824,7 +826,7 @@ entry:
     <vscale x 16 x i32> %1,
     <vscale x 1 x i64> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret <vscale x 1 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll
deleted file mode 100644
index 8c5275ce97a9fd..00000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll
+++ /dev/null
@@ -1,1442 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vwsub.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwsub_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv1i16_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwsub_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_vv_nxv2i16_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vwsub.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwsub_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv2i16_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwsub_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_vv_nxv4i16_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vwsub.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwsub_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv4i16_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwsub_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_vv_nxv8i16_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vwsub.vv v10, v8, v9
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwsub_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv8i16_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vwsub.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwsub_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_vv_nxv16i16_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vwsub.vv v12, v8, v10
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwsub_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv16i16_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vwsub.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwsub_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_vv_nxv32i16_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vwsub.vv v16, v8, v12
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwsub_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv32i16_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vwsub.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwsub_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_vv_nxv1i32_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vwsub.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwsub_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv1i32_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwsub_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_vv_nxv2i32_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vwsub.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwsub_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv2i32_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwsub_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_vv_nxv4i32_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vwsub.vv v10, v8, v9
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwsub_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv4i32_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vwsub.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwsub_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_vv_nxv8i32_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vwsub.vv v12, v8, v10
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwsub_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv8i32_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vwsub.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwsub_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_vv_nxv16i32_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vwsub.vv v16, v8, v12
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwsub_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv16i32_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vwsub.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwsub_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_vv_nxv1i64_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vwsub.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwsub_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv1i64_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwsub_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_vv_nxv2i64_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vwsub.vv v10, v8, v9
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwsub_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv2i64_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vwsub.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwsub_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_vv_nxv4i64_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vwsub.vv v12, v8, v10
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwsub_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv4i64_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vwsub.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwsub_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_vv_nxv8i64_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vwsub.vv v16, v8, v12
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwsub_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv8i64_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vwsub.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  i8,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwsub_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_vx_nxv1i16_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vwsub.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwsub_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv1i16_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vwsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i8> %1,
-    i8 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  i8,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwsub_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_vx_nxv2i16_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vwsub.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwsub_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv2i16_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vwsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i8> %1,
-    i8 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  i8,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwsub_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_vx_nxv4i16_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vwsub.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwsub_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv4i16_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vwsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i8> %1,
-    i8 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  i8,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwsub_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_vx_nxv8i16_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vwsub.vx v10, v8, a0
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwsub_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv8i16_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vwsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i8> %1,
-    i8 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  i8,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwsub_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_vx_nxv16i16_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vwsub.vx v12, v8, a0
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwsub_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv16i16_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vwsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i8> %1,
-    i8 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  i8,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwsub_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_vx_nxv32i16_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vwsub.vx v16, v8, a0
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwsub_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv32i16_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vwsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i8> %1,
-    i8 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  i16,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwsub_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_vx_nxv1i32_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vwsub.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwsub_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv1i32_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vwsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i16> %1,
-    i16 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  i16,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwsub_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_vx_nxv2i32_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vwsub.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwsub_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv2i32_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vwsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i16> %1,
-    i16 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  i16,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwsub_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_vx_nxv4i32_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vwsub.vx v10, v8, a0
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwsub_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv4i32_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vwsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i16> %1,
-    i16 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  i16,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwsub_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_vx_nxv8i32_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vwsub.vx v12, v8, a0
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwsub_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv8i32_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vwsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i16> %1,
-    i16 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  i16,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwsub_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_vx_nxv16i32_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vwsub.vx v16, v8, a0
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwsub_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv16i32_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vwsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i16> %1,
-    i16 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i32,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwsub_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_vx_nxv1i64_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vwsub.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwsub_mask_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv1i64_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vwsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i32,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwsub_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_vx_nxv2i64_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vwsub.vx v10, v8, a0
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwsub_mask_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv2i64_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vwsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i32,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwsub_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_vx_nxv4i64_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vwsub.vx v12, v8, a0
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwsub_mask_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv4i64_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vwsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i32,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwsub_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_vx_nxv8i64_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vwsub.vx v16, v8, a0
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwsub_mask_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv8i64_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vwsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub.ll
similarity index 87%
rename from llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vwsub.ll
index 011b3dc19325a6..698a363cca266d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsub.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -19,7 +21,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -29,10 +31,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwsub_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwsub_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv1i16_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -44,7 +46,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -53,9 +55,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwsub_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwsub_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_vv_nxv2i16_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -67,7 +69,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -77,10 +79,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwsub_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwsub_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv2i16_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -92,7 +94,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -101,9 +103,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwsub_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwsub_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_vv_nxv4i16_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -115,7 +117,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -125,10 +127,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwsub_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwsub_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv4i16_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -140,7 +142,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -149,9 +151,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwsub_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwsub_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_vv_nxv8i16_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -163,7 +165,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -173,10 +175,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwsub_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwsub_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv8i16_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -188,7 +190,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -197,9 +199,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwsub_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwsub_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_vv_nxv16i16_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -211,7 +213,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -221,10 +223,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwsub_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwsub_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv16i16_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -236,7 +238,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -245,9 +247,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwsub_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwsub_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_vv_nxv32i16_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -259,7 +261,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -269,10 +271,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwsub_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwsub_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv32i16_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -284,7 +286,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -293,9 +295,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwsub_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwsub_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_vv_nxv1i32_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -307,7 +309,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -317,10 +319,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwsub_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwsub_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv1i32_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -332,7 +334,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -341,9 +343,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwsub_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwsub_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_vv_nxv2i32_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -355,7 +357,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -365,10 +367,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwsub_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwsub_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv2i32_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -380,7 +382,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -389,9 +391,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwsub_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwsub_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_vv_nxv4i32_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -403,7 +405,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -413,10 +415,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwsub_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwsub_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv4i32_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -428,7 +430,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -437,9 +439,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwsub_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwsub_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_vv_nxv8i32_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -451,7 +453,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -461,10 +463,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwsub_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwsub_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv8i32_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -476,7 +478,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -485,9 +487,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwsub_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwsub_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_vv_nxv16i32_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -499,7 +501,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -509,10 +511,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwsub_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwsub_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv16i32_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -524,7 +526,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -533,9 +535,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwsub_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwsub_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_vv_nxv1i64_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -547,7 +549,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -557,10 +559,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwsub_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwsub_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv1i64_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -572,7 +574,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -581,9 +583,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwsub_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwsub_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_vv_nxv2i64_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -595,7 +597,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -605,10 +607,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwsub_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwsub_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv2i64_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -620,7 +622,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -629,9 +631,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwsub_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwsub_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_vv_nxv4i64_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -643,7 +645,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -653,10 +655,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwsub_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwsub_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv4i64_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -668,7 +670,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -677,9 +679,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwsub_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwsub_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_vv_nxv8i64_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -691,7 +693,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -701,10 +703,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwsub_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwsub_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv8i64_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -716,7 +718,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -725,9 +727,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwsub_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwsub_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_vx_nxv1i16_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -739,7 +741,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -749,10 +751,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwsub_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwsub_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv1i16_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -764,7 +766,7 @@ entry:
     <vscale x 1 x i8> %1,
     i8 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -773,9 +775,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwsub_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwsub_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_vx_nxv2i16_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -787,7 +789,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -797,10 +799,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwsub_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwsub_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv2i16_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -812,7 +814,7 @@ entry:
     <vscale x 2 x i8> %1,
     i8 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -821,9 +823,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwsub_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwsub_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_vx_nxv4i16_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -835,7 +837,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -845,10 +847,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwsub_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwsub_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv4i16_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -860,7 +862,7 @@ entry:
     <vscale x 4 x i8> %1,
     i8 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -869,9 +871,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwsub_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwsub_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_vx_nxv8i16_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -883,7 +885,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -893,10 +895,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwsub_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwsub_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv8i16_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -908,7 +910,7 @@ entry:
     <vscale x 8 x i8> %1,
     i8 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -917,9 +919,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwsub_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwsub_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_vx_nxv16i16_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -931,7 +933,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -941,10 +943,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwsub_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwsub_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv16i16_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -956,7 +958,7 @@ entry:
     <vscale x 16 x i8> %1,
     i8 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -965,9 +967,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwsub_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwsub_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_vx_nxv32i16_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -979,7 +981,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -989,10 +991,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwsub_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwsub_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv32i16_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1004,7 +1006,7 @@ entry:
     <vscale x 32 x i8> %1,
     i8 %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1013,9 +1015,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwsub_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwsub_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_vx_nxv1i32_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1027,7 +1029,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1037,10 +1039,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwsub_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwsub_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv1i32_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1052,7 +1054,7 @@ entry:
     <vscale x 1 x i16> %1,
     i16 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1061,9 +1063,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwsub_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwsub_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_vx_nxv2i32_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1075,7 +1077,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1085,10 +1087,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwsub_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwsub_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv2i32_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1100,7 +1102,7 @@ entry:
     <vscale x 2 x i16> %1,
     i16 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1109,9 +1111,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwsub_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwsub_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_vx_nxv4i32_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1123,7 +1125,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1133,10 +1135,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwsub_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwsub_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv4i32_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1148,7 +1150,7 @@ entry:
     <vscale x 4 x i16> %1,
     i16 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1157,9 +1159,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwsub_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwsub_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_vx_nxv8i32_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1171,7 +1173,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1181,10 +1183,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwsub_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwsub_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv8i32_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1196,7 +1198,7 @@ entry:
     <vscale x 8 x i16> %1,
     i16 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1205,9 +1207,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwsub_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwsub_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_vx_nxv16i32_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1219,7 +1221,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1229,10 +1231,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwsub_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwsub_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv16i32_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1244,7 +1246,7 @@ entry:
     <vscale x 16 x i16> %1,
     i16 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1253,9 +1255,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwsub_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwsub_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_vx_nxv1i64_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1267,7 +1269,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1277,10 +1279,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwsub_mask_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwsub_mask_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv1i64_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1292,7 +1294,7 @@ entry:
     <vscale x 1 x i32> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1301,9 +1303,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwsub_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwsub_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_vx_nxv2i64_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1315,7 +1317,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1325,10 +1327,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwsub_mask_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwsub_mask_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv2i64_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1340,7 +1342,7 @@ entry:
     <vscale x 2 x i32> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1349,9 +1351,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwsub_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwsub_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_vx_nxv4i64_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1363,7 +1365,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1373,10 +1375,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwsub_mask_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwsub_mask_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv4i64_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1388,7 +1390,7 @@ entry:
     <vscale x 4 x i32> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1397,9 +1399,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwsub_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwsub_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_vx_nxv8i64_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1411,7 +1413,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1421,10 +1423,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwsub_mask_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwsub_mask_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv8i64_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1436,7 +1438,7 @@ entry:
     <vscale x 8 x i32> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll
deleted file mode 100644
index d6d1b4dcc74133..00000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll
+++ /dev/null
@@ -1,2163 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vwsub.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwsub.w_mask_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i16_nxv1i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwsub.w_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv2i16_nxv2i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vwsub.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.nxv2i8(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwsub.w_mask_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i16_nxv2i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwsub.w_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv4i16_nxv4i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vwsub.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.nxv4i8(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwsub.w_mask_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i16_nxv4i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwsub.w_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv8i16_nxv8i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vwsub.wv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.nxv8i8(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwsub.w_mask_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i16_nxv8i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vwsub.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwsub.w_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv16i16_nxv16i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vwsub.wv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.nxv16i8(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwsub.w_mask_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv16i16_nxv16i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vwsub.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwsub.w_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv32i16_nxv32i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vwsub.wv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.nxv32i8(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl4r.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vwsub.wv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwsub.w_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i32_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vwsub.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwsub.w_mask_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i32_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwsub.w_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv2i32_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vwsub.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwsub.w_mask_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i32_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwsub.w_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv4i32_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vwsub.wv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwsub.w_mask_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i32_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vwsub.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwsub.w_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv8i32_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vwsub.wv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwsub.w_mask_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i32_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vwsub.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwsub.w_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv16i32_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vwsub.wv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl4re16.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vwsub.wv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwsub.w_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i64_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vwsub.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwsub.w_mask_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i64_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwsub.w_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv2i64_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vwsub.wv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwsub.w_mask_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i64_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vwsub.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwsub.w_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv4i64_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vwsub.wv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwsub.w_mask_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i64_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vwsub.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwsub.w_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv8i64_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vwsub.wv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwsub.w_mask_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i64_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl4re32.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vwsub.wv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i8,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwsub.w_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv1i16_nxv1i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.i8(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i8,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwsub.w_mask_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i16_nxv1i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vwsub.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i8 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i8,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwsub.w_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv2i16_nxv2i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.i8(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i8,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwsub.w_mask_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i16_nxv2i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vwsub.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i8 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i8,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwsub.w_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv4i16_nxv4i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.i8(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i8,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwsub.w_mask_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i16_nxv4i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vwsub.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i8 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i8,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwsub.w_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv8i16_nxv8i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.i8(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i8,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwsub.w_mask_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i16_nxv8i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vwsub.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i8 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i8,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwsub.w_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv16i16_nxv16i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.i8(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i8,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwsub.w_mask_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv16i16_nxv16i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vwsub.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i8 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i8,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwsub.w_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv32i16_nxv32i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.i8(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i8,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwsub.w_mask_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv32i16_nxv32i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vwsub.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i8 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i16,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwsub.w_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv1i32_nxv1i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.i16(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i16,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwsub.w_mask_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i32_nxv1i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vwsub.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i16 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i16,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwsub.w_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv2i32_nxv2i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.i16(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i16,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwsub.w_mask_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i32_nxv2i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vwsub.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i16 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i16,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwsub.w_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv4i32_nxv4i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.i16(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i16,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwsub.w_mask_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i32_nxv4i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vwsub.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i16 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i16,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwsub.w_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv8i32_nxv8i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.i16(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i16,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwsub.w_mask_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i32_nxv8i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vwsub.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i16 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i16,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwsub.w_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv16i32_nxv16i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.i16(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i16,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwsub.w_mask_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv16i32_nxv16i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vwsub.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i16 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i32,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwsub.w_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv1i64_nxv1i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.i32(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i32,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwsub.w_mask_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i64_nxv1i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vwsub.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i32,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwsub.w_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv2i64_nxv2i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.i32(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i32,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwsub.w_mask_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i64_nxv2i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vwsub.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i32,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwsub.w_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv4i64_nxv4i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.i32(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i32,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwsub.w_mask_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i64_nxv4i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vwsub.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i32,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwsub.w_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv8i64_nxv8i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.i32(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i32,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwsub.w_mask_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i64_nxv8i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vwsub.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vwsub.wv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vwsub.wv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vwsub.wv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vwsub.wv v8, v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vwsub.wv v8, v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vwsub.wv v8, v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vwsub.wv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vwsub.wv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vwsub.wv v8, v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vwsub.wv v8, v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 16 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vwsub.wv v8, v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vwsub.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vwsub.wv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vwsub.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vwsub.wv v8, v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vwsub.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vwsub.wv v8, v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vwsub.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vwsub.wv v8, v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vwsub.w_mask_wx_tie_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv1i16_nxv1i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %0,
-    i8 %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vwsub.w_mask_wx_tie_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv2i16_nxv2i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %0,
-    i8 %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vwsub.w_mask_wx_tie_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv4i16_nxv4i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %0,
-    i8 %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vwsub.w_mask_wx_tie_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv8i16_nxv8i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %0,
-    i8 %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vwsub.w_mask_wx_tie_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv16i16_nxv16i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %0,
-    i8 %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vwsub.w_mask_wx_tie_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv32i16_nxv32i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %0,
-    i8 %1,
-    <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vwsub.w_mask_wx_tie_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv1i32_nxv1i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %0,
-    i16 %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vwsub.w_mask_wx_tie_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv2i32_nxv2i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %0,
-    i16 %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vwsub.w_mask_wx_tie_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv4i32_nxv4i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %0,
-    i16 %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vwsub.w_mask_wx_tie_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv8i32_nxv8i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %0,
-    i16 %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 16 x i32> @intrinsic_vwsub.w_mask_wx_tie_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv16i32_nxv16i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %0,
-    i16 %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vwsub.w_mask_wx_tie_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv1i64_nxv1i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %0,
-    i32 %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vwsub.w_mask_wx_tie_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv2i64_nxv2i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %0,
-    i32 %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vwsub.w_mask_wx_tie_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv4i64_nxv4i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %0,
-    i32 %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vwsub.w_mask_wx_tie_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv8i64_nxv8i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %0,
-    i32 %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vwsub.w_wv_untie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv1i16_nxv1i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vwsub.wv v10, v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i8> %0,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vwsub.w_wv_untie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv2i16_nxv2i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vwsub.wv v10, v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.nxv2i8(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i8> %0,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vwsub.w_wv_untie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv4i16_nxv4i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vwsub.wv v10, v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.nxv4i8(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i8> %0,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vwsub.w_wv_untie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv8i16_nxv8i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vwsub.wv v12, v10, v8
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.nxv8i8(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i8> %0,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vwsub.w_wv_untie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv16i16_nxv16i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vwsub.wv v16, v12, v8
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.nxv16i8(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i8> %0,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vwsub.w_wv_untie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv32i16_nxv32i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vwsub.wv v24, v16, v8
-; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.nxv32i8(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %1,
-    <vscale x 32 x i8> %0,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vwsub.w_wv_untie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv1i32_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vwsub.wv v10, v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i16> %0,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vwsub.w_wv_untie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv2i32_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vwsub.wv v10, v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i16> %0,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vwsub.w_wv_untie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv4i32_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vwsub.wv v12, v10, v8
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i16> %0,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vwsub.w_wv_untie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv8i32_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vwsub.wv v16, v12, v8
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i16> %0,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vwsub.w_wv_untie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv1i64_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vwsub.wv v10, v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %0,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vwsub.w_wv_untie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv2i64_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vwsub.wv v12, v10, v8
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %0,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vwsub.w_wv_untie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv4i64_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vwsub.wv v16, v12, v8
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %0,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vwsub.w_wv_untie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv8i64_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vwsub.wv v24, v16, v8
-; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %0,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub.w.ll
similarity index 87%
rename from llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vwsub.w.ll
index ced61224dcf580..1d237384bf0a23 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsub.w.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -18,7 +20,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -28,10 +30,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwsub.w_mask_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwsub.w_mask_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -43,7 +45,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -52,9 +54,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.nxv2i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwsub.w_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwsub.w_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -65,7 +67,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -75,10 +77,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwsub.w_mask_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwsub.w_mask_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -90,7 +92,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -99,9 +101,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.nxv4i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwsub.w_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwsub.w_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -112,7 +114,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -122,10 +124,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwsub.w_mask_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwsub.w_mask_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -137,7 +139,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -146,9 +148,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.nxv8i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwsub.w_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwsub.w_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -159,7 +161,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -169,10 +171,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwsub.w_mask_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwsub.w_mask_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -184,7 +186,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -193,9 +195,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.nxv16i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwsub.w_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwsub.w_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -206,7 +208,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -216,10 +218,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwsub.w_mask_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwsub.w_mask_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -231,7 +233,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -240,9 +242,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.nxv32i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwsub.w_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwsub.w_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -253,7 +255,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
     <vscale x 32 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -263,10 +265,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl4r.v v24, (a0)
@@ -279,7 +281,7 @@ entry:
     <vscale x 32 x i16> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -288,9 +290,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.nxv1i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwsub.w_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwsub.w_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -301,7 +303,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -311,10 +313,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwsub.w_mask_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwsub.w_mask_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -326,7 +328,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -335,9 +337,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.nxv2i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwsub.w_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwsub.w_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -348,7 +350,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -358,10 +360,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwsub.w_mask_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwsub.w_mask_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -373,7 +375,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -382,9 +384,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.nxv4i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwsub.w_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwsub.w_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -395,7 +397,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -405,10 +407,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwsub.w_mask_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwsub.w_mask_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -420,7 +422,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -429,9 +431,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.nxv8i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwsub.w_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwsub.w_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -442,7 +444,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -452,10 +454,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwsub.w_mask_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwsub.w_mask_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -467,7 +469,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -476,9 +478,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.nxv16i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwsub.w_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwsub.w_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv16i32_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -489,7 +491,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -499,10 +501,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl4re16.v v24, (a0)
@@ -515,7 +517,7 @@ entry:
     <vscale x 16 x i32> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -524,9 +526,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.nxv1i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwsub.w_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwsub.w_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -537,7 +539,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -547,10 +549,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwsub.w_mask_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwsub.w_mask_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -562,7 +564,7 @@ entry:
     <vscale x 1 x i64> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -571,9 +573,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.nxv2i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwsub.w_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwsub.w_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -584,7 +586,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -594,10 +596,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwsub.w_mask_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwsub.w_mask_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -609,7 +611,7 @@ entry:
     <vscale x 2 x i64> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -618,9 +620,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.nxv4i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwsub.w_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwsub.w_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -631,7 +633,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -641,10 +643,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwsub.w_mask_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwsub.w_mask_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -656,7 +658,7 @@ entry:
     <vscale x 4 x i64> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -665,9 +667,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.nxv8i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwsub.w_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwsub.w_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -678,7 +680,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -688,10 +690,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwsub.w_mask_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwsub.w_mask_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl4re32.v v24, (a0)
@@ -704,7 +706,7 @@ entry:
     <vscale x 8 x i64> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -713,9 +715,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwsub.w_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwsub.w_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv1i16_nxv1i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -726,7 +728,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -736,10 +738,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.i8(
   <vscale x 1 x i16>,
   i8,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwsub.w_mask_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwsub.w_mask_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i16_nxv1i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -751,7 +753,7 @@ entry:
     <vscale x 1 x i16> %1,
     i8 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -760,9 +762,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwsub.w_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwsub.w_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv2i16_nxv2i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -773,7 +775,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -783,10 +785,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.i8(
   <vscale x 2 x i16>,
   i8,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwsub.w_mask_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwsub.w_mask_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i16_nxv2i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -798,7 +800,7 @@ entry:
     <vscale x 2 x i16> %1,
     i8 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -807,9 +809,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwsub.w_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwsub.w_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv4i16_nxv4i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -820,7 +822,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -830,10 +832,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.i8(
   <vscale x 4 x i16>,
   i8,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwsub.w_mask_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwsub.w_mask_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i16_nxv4i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -845,7 +847,7 @@ entry:
     <vscale x 4 x i16> %1,
     i8 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -854,9 +856,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwsub.w_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwsub.w_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv8i16_nxv8i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -867,7 +869,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -877,10 +879,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.i8(
   <vscale x 8 x i16>,
   i8,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwsub.w_mask_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwsub.w_mask_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i16_nxv8i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -892,7 +894,7 @@ entry:
     <vscale x 8 x i16> %1,
     i8 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -901,9 +903,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwsub.w_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwsub.w_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv16i16_nxv16i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -914,7 +916,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -924,10 +926,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.i8(
   <vscale x 16 x i16>,
   i8,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwsub.w_mask_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwsub.w_mask_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv16i16_nxv16i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -939,7 +941,7 @@ entry:
     <vscale x 16 x i16> %1,
     i8 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -948,9 +950,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwsub.w_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwsub.w_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv32i16_nxv32i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -961,7 +963,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -971,10 +973,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.i8(
   <vscale x 32 x i16>,
   i8,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwsub.w_mask_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwsub.w_mask_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv32i16_nxv32i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -986,7 +988,7 @@ entry:
     <vscale x 32 x i16> %1,
     i8 %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -995,9 +997,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwsub.w_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwsub.w_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv1i32_nxv1i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1008,7 +1010,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1018,10 +1020,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.i16(
   <vscale x 1 x i32>,
   i16,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwsub.w_mask_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwsub.w_mask_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i32_nxv1i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1033,7 +1035,7 @@ entry:
     <vscale x 1 x i32> %1,
     i16 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1042,9 +1044,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwsub.w_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwsub.w_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv2i32_nxv2i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1055,7 +1057,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1065,10 +1067,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.i16(
   <vscale x 2 x i32>,
   i16,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwsub.w_mask_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwsub.w_mask_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i32_nxv2i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1080,7 +1082,7 @@ entry:
     <vscale x 2 x i32> %1,
     i16 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1089,9 +1091,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwsub.w_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwsub.w_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv4i32_nxv4i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1102,7 +1104,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1112,10 +1114,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.i16(
   <vscale x 4 x i32>,
   i16,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwsub.w_mask_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwsub.w_mask_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i32_nxv4i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1127,7 +1129,7 @@ entry:
     <vscale x 4 x i32> %1,
     i16 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1136,9 +1138,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwsub.w_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwsub.w_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv8i32_nxv8i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1149,7 +1151,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1159,10 +1161,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.i16(
   <vscale x 8 x i32>,
   i16,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwsub.w_mask_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwsub.w_mask_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i32_nxv8i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1174,7 +1176,7 @@ entry:
     <vscale x 8 x i32> %1,
     i16 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1183,9 +1185,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwsub.w_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwsub.w_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv16i32_nxv16i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1196,7 +1198,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1206,10 +1208,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.i16(
   <vscale x 16 x i32>,
   i16,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwsub.w_mask_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwsub.w_mask_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv16i32_nxv16i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1221,7 +1223,7 @@ entry:
     <vscale x 16 x i32> %1,
     i16 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1230,9 +1232,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwsub.w_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwsub.w_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv1i64_nxv1i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1243,7 +1245,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1253,10 +1255,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.i32(
   <vscale x 1 x i64>,
   i32,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwsub.w_mask_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwsub.w_mask_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i64_nxv1i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1268,7 +1270,7 @@ entry:
     <vscale x 1 x i64> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1277,9 +1279,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwsub.w_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwsub.w_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv2i64_nxv2i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1290,7 +1292,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1300,10 +1302,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.i32(
   <vscale x 2 x i64>,
   i32,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwsub.w_mask_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwsub.w_mask_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i64_nxv2i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1315,7 +1317,7 @@ entry:
     <vscale x 2 x i64> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1324,9 +1326,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwsub.w_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwsub.w_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv4i64_nxv4i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1337,7 +1339,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1347,10 +1349,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.i32(
   <vscale x 4 x i64>,
   i32,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwsub.w_mask_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwsub.w_mask_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i64_nxv4i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1362,7 +1364,7 @@ entry:
     <vscale x 4 x i64> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1371,9 +1373,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwsub.w_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwsub.w_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv8i64_nxv8i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1384,7 +1386,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1394,10 +1396,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.i32(
   <vscale x 8 x i64>,
   i32,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwsub.w_mask_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwsub.w_mask_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i64_nxv8i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1409,12 +1411,12 @@ entry:
     <vscale x 8 x i64> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -1426,12 +1428,12 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x i8> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -1443,12 +1445,12 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x i8> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -1460,12 +1462,12 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x i8> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -1477,12 +1479,12 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -1494,12 +1496,12 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x i8> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 32 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -1511,12 +1513,12 @@ entry:
     <vscale x 32 x i16> %0,
     <vscale x 32 x i8> %1,
     <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -1528,12 +1530,12 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x i16> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -1545,12 +1547,12 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x i16> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -1562,12 +1564,12 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -1579,12 +1581,12 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x i16> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 16 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -1596,12 +1598,12 @@ entry:
     <vscale x 16 x i32> %0,
     <vscale x 16 x i16> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
 
-define <vscale x 1 x i64> @intrinsic_vwsub.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwsub.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -1613,12 +1615,12 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x i32> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
 
-define <vscale x 2 x i64> @intrinsic_vwsub.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwsub.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -1630,12 +1632,12 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
 
-define <vscale x 4 x i64> @intrinsic_vwsub.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwsub.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -1647,12 +1649,12 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x i32> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
 
-define <vscale x 8 x i64> @intrinsic_vwsub.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwsub.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -1664,12 +1666,12 @@ entry:
     <vscale x 8 x i64> %0,
     <vscale x 8 x i32> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vwsub.w_mask_wx_tie_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwsub.w_mask_wx_tie_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv1i16_nxv1i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -1681,12 +1683,12 @@ entry:
     <vscale x 1 x i16> %0,
     i8 %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vwsub.w_mask_wx_tie_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwsub.w_mask_wx_tie_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv2i16_nxv2i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -1698,12 +1700,12 @@ entry:
     <vscale x 2 x i16> %0,
     i8 %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vwsub.w_mask_wx_tie_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwsub.w_mask_wx_tie_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv4i16_nxv4i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -1715,12 +1717,12 @@ entry:
     <vscale x 4 x i16> %0,
     i8 %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vwsub.w_mask_wx_tie_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwsub.w_mask_wx_tie_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv8i16_nxv8i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -1732,12 +1734,12 @@ entry:
     <vscale x 8 x i16> %0,
     i8 %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vwsub.w_mask_wx_tie_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwsub.w_mask_wx_tie_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv16i16_nxv16i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -1749,12 +1751,12 @@ entry:
     <vscale x 16 x i16> %0,
     i8 %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 32 x i16> @intrinsic_vwsub.w_mask_wx_tie_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwsub.w_mask_wx_tie_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv32i16_nxv32i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1766,12 +1768,12 @@ entry:
     <vscale x 32 x i16> %0,
     i8 %1,
     <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vwsub.w_mask_wx_tie_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwsub.w_mask_wx_tie_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv1i32_nxv1i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1783,12 +1785,12 @@ entry:
     <vscale x 1 x i32> %0,
     i16 %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vwsub.w_mask_wx_tie_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwsub.w_mask_wx_tie_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv2i32_nxv2i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1800,12 +1802,12 @@ entry:
     <vscale x 2 x i32> %0,
     i16 %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vwsub.w_mask_wx_tie_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwsub.w_mask_wx_tie_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv4i32_nxv4i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1817,12 +1819,12 @@ entry:
     <vscale x 4 x i32> %0,
     i16 %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vwsub.w_mask_wx_tie_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwsub.w_mask_wx_tie_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv8i32_nxv8i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1834,12 +1836,12 @@ entry:
     <vscale x 8 x i32> %0,
     i16 %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 16 x i32> @intrinsic_vwsub.w_mask_wx_tie_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwsub.w_mask_wx_tie_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv16i32_nxv16i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1851,12 +1853,12 @@ entry:
     <vscale x 16 x i32> %0,
     i16 %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
 
-define <vscale x 1 x i64> @intrinsic_vwsub.w_mask_wx_tie_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwsub.w_mask_wx_tie_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv1i64_nxv1i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1868,12 +1870,12 @@ entry:
     <vscale x 1 x i64> %0,
     i32 %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
 
-define <vscale x 2 x i64> @intrinsic_vwsub.w_mask_wx_tie_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwsub.w_mask_wx_tie_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv2i64_nxv2i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1885,12 +1887,12 @@ entry:
     <vscale x 2 x i64> %0,
     i32 %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
 
-define <vscale x 4 x i64> @intrinsic_vwsub.w_mask_wx_tie_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwsub.w_mask_wx_tie_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv4i64_nxv4i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1902,12 +1904,12 @@ entry:
     <vscale x 4 x i64> %0,
     i32 %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
 
-define <vscale x 8 x i64> @intrinsic_vwsub.w_mask_wx_tie_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwsub.w_mask_wx_tie_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv8i64_nxv8i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1919,12 +1921,12 @@ entry:
     <vscale x 8 x i64> %0,
     i32 %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vwsub.w_wv_untie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwsub.w_wv_untie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -1936,12 +1938,12 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %1,
     <vscale x 1 x i8> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vwsub.w_wv_untie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwsub.w_wv_untie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -1953,12 +1955,12 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %1,
     <vscale x 2 x i8> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vwsub.w_wv_untie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwsub.w_wv_untie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -1970,12 +1972,12 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i8> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vwsub.w_wv_untie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwsub.w_wv_untie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -1987,12 +1989,12 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %1,
     <vscale x 8 x i8> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vwsub.w_wv_untie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwsub.w_wv_untie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -2004,12 +2006,12 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %1,
     <vscale x 16 x i8> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 32 x i16> @intrinsic_vwsub.w_wv_untie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwsub.w_wv_untie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -2021,12 +2023,12 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %1,
     <vscale x 32 x i8> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vwsub.w_wv_untie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwsub.w_wv_untie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -2038,12 +2040,12 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %1,
     <vscale x 1 x i16> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vwsub.w_wv_untie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwsub.w_wv_untie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -2055,12 +2057,12 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i16> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vwsub.w_wv_untie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwsub.w_wv_untie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -2072,12 +2074,12 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %1,
     <vscale x 4 x i16> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vwsub.w_wv_untie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwsub.w_wv_untie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -2089,12 +2091,12 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %1,
     <vscale x 8 x i16> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 1 x i64> @intrinsic_vwsub.w_wv_untie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwsub.w_wv_untie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -2106,12 +2108,12 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %1,
     <vscale x 1 x i32> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
 
-define <vscale x 2 x i64> @intrinsic_vwsub.w_wv_untie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwsub.w_wv_untie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -2123,12 +2125,12 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %1,
     <vscale x 2 x i32> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
 
-define <vscale x 4 x i64> @intrinsic_vwsub.w_wv_untie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwsub.w_wv_untie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -2140,12 +2142,12 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %1,
     <vscale x 4 x i32> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
 
-define <vscale x 8 x i64> @intrinsic_vwsub.w_wv_untie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwsub.w_wv_untie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -2157,7 +2159,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %1,
     <vscale x 8 x i32> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll
deleted file mode 100644
index 9198c7b80ccaf4..00000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll
+++ /dev/null
@@ -1,1442 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vwsubu.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwsubu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv1i16_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vwsubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwsubu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_vv_nxv2i16_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vwsubu.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwsubu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv2i16_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vwsubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwsubu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_vv_nxv4i16_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vwsubu.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwsubu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv4i16_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vwsubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwsubu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_vv_nxv8i16_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vwsubu.vv v10, v8, v9
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwsubu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv8i16_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vwsubu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwsubu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_vv_nxv16i16_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vwsubu.vv v12, v8, v10
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwsubu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv16i16_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vwsubu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwsubu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_vv_nxv32i16_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vwsubu.vv v16, v8, v12
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwsubu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv32i16_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vwsubu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwsubu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_vv_nxv1i32_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vwsubu.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwsubu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv1i32_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vwsubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwsubu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_vv_nxv2i32_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vwsubu.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwsubu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv2i32_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vwsubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwsubu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_vv_nxv4i32_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vwsubu.vv v10, v8, v9
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwsubu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv4i32_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vwsubu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwsubu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_vv_nxv8i32_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vwsubu.vv v12, v8, v10
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwsubu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv8i32_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vwsubu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwsubu_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_vv_nxv16i32_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vwsubu.vv v16, v8, v12
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwsubu_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv16i32_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vwsubu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwsubu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_vv_nxv1i64_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vwsubu.vv v10, v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwsubu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv1i64_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vwsubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwsubu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_vv_nxv2i64_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vwsubu.vv v10, v8, v9
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwsubu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv2i64_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vwsubu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwsubu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_vv_nxv4i64_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vwsubu.vv v12, v8, v10
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwsubu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv4i64_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vwsubu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwsubu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_vv_nxv8i64_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vwsubu.vv v16, v8, v12
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwsubu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv8i64_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vwsubu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  i8,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwsubu_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_vx_nxv1i16_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vwsubu.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  i8,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwsubu_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv1i16_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vwsubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i8> %1,
-    i8 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  i8,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwsubu_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_vx_nxv2i16_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vwsubu.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  i8,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwsubu_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv2i16_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vwsubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i8> %1,
-    i8 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  i8,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwsubu_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_vx_nxv4i16_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vwsubu.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  i8,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwsubu_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv4i16_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vwsubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i8> %1,
-    i8 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  i8,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwsubu_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_vx_nxv8i16_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vwsubu.vx v10, v8, a0
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  i8,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwsubu_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv8i16_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vwsubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i8> %1,
-    i8 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  i8,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwsubu_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_vx_nxv16i16_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vwsubu.vx v12, v8, a0
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  i8,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwsubu_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv16i16_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vwsubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i8> %1,
-    i8 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  i8,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwsubu_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_vx_nxv32i16_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vwsubu.vx v16, v8, a0
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i8> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  i8,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwsubu_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv32i16_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vwsubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i8> %1,
-    i8 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  i16,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwsubu_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_vx_nxv1i32_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vwsubu.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  i16,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwsubu_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv1i32_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vwsubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i16> %1,
-    i16 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  i16,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwsubu_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_vx_nxv2i32_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vwsubu.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  i16,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwsubu_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv2i32_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vwsubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i16> %1,
-    i16 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  i16,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwsubu_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_vx_nxv4i32_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vwsubu.vx v10, v8, a0
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  i16,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwsubu_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv4i32_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vwsubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i16> %1,
-    i16 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  i16,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwsubu_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_vx_nxv8i32_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vwsubu.vx v12, v8, a0
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  i16,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwsubu_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv8i32_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vwsubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i16> %1,
-    i16 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  i16,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwsubu_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_vx_nxv16i32_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vwsubu.vx v16, v8, a0
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i16> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  i16,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwsubu_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv16i32_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vwsubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i16> %1,
-    i16 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i32,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwsubu_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_vx_nxv1i64_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vwsubu.vx v9, v8, a0
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwsubu_mask_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv1i64_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vwsubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i32,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwsubu_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_vx_nxv2i64_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vwsubu.vx v10, v8, a0
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwsubu_mask_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv2i64_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vwsubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i32,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwsubu_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_vx_nxv4i64_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vwsubu.vx v12, v8, a0
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwsubu_mask_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv4i64_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vwsubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i32,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwsubu_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_vx_nxv8i64_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vwsubu.vx v16, v8, a0
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwsubu_mask_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv8i64_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vwsubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu.ll
similarity index 87%
rename from llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vwsubu.ll
index 047032f997bd84..59fa782e241616 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -19,7 +21,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -29,10 +31,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwsubu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwsubu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv1i16_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -44,7 +46,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -53,9 +55,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwsubu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwsubu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv2i16_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -67,7 +69,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -77,10 +79,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwsubu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwsubu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv2i16_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -92,7 +94,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -101,9 +103,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwsubu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwsubu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv4i16_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -115,7 +117,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -125,10 +127,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwsubu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwsubu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv4i16_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -140,7 +142,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -149,9 +151,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwsubu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwsubu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv8i16_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -163,7 +165,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -173,10 +175,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwsubu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwsubu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv8i16_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -188,7 +190,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -197,9 +199,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwsubu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwsubu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv16i16_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -211,7 +213,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -221,10 +223,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwsubu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwsubu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv16i16_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -236,7 +238,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -245,9 +247,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwsubu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwsubu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv32i16_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -259,7 +261,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -269,10 +271,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwsubu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwsubu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv32i16_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -284,7 +286,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -293,9 +295,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwsubu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwsubu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv1i32_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -307,7 +309,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -317,10 +319,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwsubu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwsubu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv1i32_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -332,7 +334,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -341,9 +343,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwsubu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwsubu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv2i32_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -355,7 +357,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -365,10 +367,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwsubu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwsubu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv2i32_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -380,7 +382,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -389,9 +391,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwsubu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwsubu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv4i32_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -403,7 +405,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -413,10 +415,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwsubu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwsubu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv4i32_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -428,7 +430,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -437,9 +439,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwsubu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwsubu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv8i32_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -451,7 +453,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -461,10 +463,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwsubu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwsubu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv8i32_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -476,7 +478,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -485,9 +487,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwsubu_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwsubu_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv16i32_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -499,7 +501,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -509,10 +511,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwsubu_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwsubu_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv16i32_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -524,7 +526,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -533,9 +535,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwsubu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwsubu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv1i64_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -547,7 +549,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -557,10 +559,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwsubu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwsubu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv1i64_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -572,7 +574,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -581,9 +583,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwsubu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwsubu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv2i64_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -595,7 +597,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -605,10 +607,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwsubu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwsubu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv2i64_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -620,7 +622,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -629,9 +631,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwsubu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwsubu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv4i64_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -643,7 +645,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -653,10 +655,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwsubu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwsubu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv4i64_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -668,7 +670,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -677,9 +679,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwsubu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwsubu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv8i64_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -691,7 +693,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -701,10 +703,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwsubu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwsubu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv8i64_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -716,7 +718,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -725,9 +727,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwsubu_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwsubu_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv1i16_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -739,7 +741,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -749,10 +751,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwsubu_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwsubu_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv1i16_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -764,7 +766,7 @@ entry:
     <vscale x 1 x i8> %1,
     i8 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -773,9 +775,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwsubu_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwsubu_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv2i16_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -787,7 +789,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -797,10 +799,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwsubu_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwsubu_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv2i16_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -812,7 +814,7 @@ entry:
     <vscale x 2 x i8> %1,
     i8 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -821,9 +823,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwsubu_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwsubu_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv4i16_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -835,7 +837,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -845,10 +847,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwsubu_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwsubu_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv4i16_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -860,7 +862,7 @@ entry:
     <vscale x 4 x i8> %1,
     i8 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -869,9 +871,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwsubu_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwsubu_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv8i16_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -883,7 +885,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -893,10 +895,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwsubu_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwsubu_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv8i16_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -908,7 +910,7 @@ entry:
     <vscale x 8 x i8> %1,
     i8 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -917,9 +919,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwsubu_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwsubu_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv16i16_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -931,7 +933,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -941,10 +943,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwsubu_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwsubu_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv16i16_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -956,7 +958,7 @@ entry:
     <vscale x 16 x i8> %1,
     i8 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -965,9 +967,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i8>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwsubu_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwsubu_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv32i16_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -979,7 +981,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i8> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -989,10 +991,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwsubu_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwsubu_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv32i16_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1004,7 +1006,7 @@ entry:
     <vscale x 32 x i8> %1,
     i8 %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1013,9 +1015,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwsubu_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwsubu_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv1i32_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1027,7 +1029,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1037,10 +1039,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwsubu_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwsubu_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv1i32_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1052,7 +1054,7 @@ entry:
     <vscale x 1 x i16> %1,
     i16 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1061,9 +1063,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwsubu_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwsubu_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv2i32_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1075,7 +1077,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1085,10 +1087,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwsubu_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwsubu_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv2i32_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1100,7 +1102,7 @@ entry:
     <vscale x 2 x i16> %1,
     i16 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1109,9 +1111,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwsubu_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwsubu_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv4i32_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1123,7 +1125,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1133,10 +1135,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwsubu_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwsubu_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv4i32_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1148,7 +1150,7 @@ entry:
     <vscale x 4 x i16> %1,
     i16 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1157,9 +1159,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwsubu_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwsubu_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv8i32_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1171,7 +1173,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1181,10 +1183,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwsubu_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwsubu_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv8i32_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1196,7 +1198,7 @@ entry:
     <vscale x 8 x i16> %1,
     i16 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1205,9 +1207,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i16>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwsubu_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwsubu_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv16i32_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1219,7 +1221,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i16> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1229,10 +1231,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwsubu_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwsubu_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv16i32_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1244,7 +1246,7 @@ entry:
     <vscale x 16 x i16> %1,
     i16 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1253,9 +1255,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwsubu_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwsubu_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv1i64_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1267,7 +1269,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1277,10 +1279,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwsubu_mask_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwsubu_mask_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv1i64_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1292,7 +1294,7 @@ entry:
     <vscale x 1 x i32> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1301,9 +1303,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwsubu_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwsubu_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv2i64_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1315,7 +1317,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1325,10 +1327,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwsubu_mask_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwsubu_mask_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv2i64_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1340,7 +1342,7 @@ entry:
     <vscale x 2 x i32> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1349,9 +1351,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwsubu_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwsubu_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv4i64_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1363,7 +1365,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1373,10 +1375,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwsubu_mask_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwsubu_mask_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv4i64_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1388,7 +1390,7 @@ entry:
     <vscale x 4 x i32> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1397,9 +1399,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i32>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwsubu_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwsubu_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv8i64_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1411,7 +1413,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i32> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1421,10 +1423,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwsubu_mask_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwsubu_mask_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv8i64_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1436,7 +1438,7 @@ entry:
     <vscale x 8 x i32> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll
deleted file mode 100644
index 85eec98222adb4..00000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll
+++ /dev/null
@@ -1,2163 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vwsubu.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwsubu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vwsubu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwsubu.w_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv2i16_nxv2i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vwsubu.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwsubu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vwsubu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwsubu.w_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv4i16_nxv4i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vwsubu.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwsubu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vwsubu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwsubu.w_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv8i16_nxv8i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vwsubu.wv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwsubu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vwsubu.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwsubu.w_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv16i16_nxv16i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vwsubu.wv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwsubu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vwsubu.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwsubu.w_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv32i16_nxv32i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vwsubu.wv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl4r.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vwsubu.wv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwsubu.w_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv1i32_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vwsubu.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwsubu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vwsubu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwsubu.w_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv2i32_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vwsubu.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwsubu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vwsubu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwsubu.w_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv4i32_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vwsubu.wv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwsubu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vwsubu.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwsubu.w_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv8i32_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vwsubu.wv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwsubu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vwsubu.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwsubu.w_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv16i32_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vwsubu.wv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl4re16.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vwsubu.wv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwsubu.w_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv1i64_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vwsubu.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwsubu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vwsubu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwsubu.w_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv2i64_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vwsubu.wv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwsubu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vwsubu.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwsubu.w_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv4i64_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vwsubu.wv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwsubu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vwsubu.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwsubu.w_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv8i64_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vwsubu.wv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwsubu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl4re32.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vwsubu.wv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i8,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwsubu.w_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv1i16_nxv1i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.i8(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i8,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vwsubu.w_mask_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv1i16_nxv1i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vwsubu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i8 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i8,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwsubu.w_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv2i16_nxv2i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.i8(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i8,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vwsubu.w_mask_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv2i16_nxv2i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vwsubu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i8 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i8,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwsubu.w_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv4i16_nxv4i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.i8(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i8,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vwsubu.w_mask_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv4i16_nxv4i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vwsubu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i8 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i8,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwsubu.w_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv8i16_nxv8i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.i8(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i8,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vwsubu.w_mask_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv8i16_nxv8i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vwsubu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i8 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i8,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwsubu.w_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv16i16_nxv16i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.i8(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i8,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vwsubu.w_mask_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv16i16_nxv16i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vwsubu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i8 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i8,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwsubu.w_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv32i16_nxv32i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.i8(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    i8 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i8,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vwsubu.w_mask_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv32i16_nxv32i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vwsubu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i8 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i16,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwsubu.w_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv1i32_nxv1i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.i16(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i16,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vwsubu.w_mask_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv1i32_nxv1i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vwsubu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i16 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i16,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwsubu.w_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv2i32_nxv2i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.i16(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i16,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vwsubu.w_mask_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv2i32_nxv2i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vwsubu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i16 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i16,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwsubu.w_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv4i32_nxv4i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.i16(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i16,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vwsubu.w_mask_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv4i32_nxv4i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vwsubu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i16 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i16,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwsubu.w_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv8i32_nxv8i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.i16(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i16,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vwsubu.w_mask_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv8i32_nxv8i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vwsubu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i16 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i16,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwsubu.w_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv16i32_nxv16i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.i16(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    i16 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i16,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vwsubu.w_mask_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv16i32_nxv16i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vwsubu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i16 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i32,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwsubu.w_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv1i64_nxv1i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.i32(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i32,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vwsubu.w_mask_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv1i64_nxv1i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vwsubu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i32,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwsubu.w_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv2i64_nxv2i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.i32(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i32,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vwsubu.w_mask_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv2i64_nxv2i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vwsubu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i32,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwsubu.w_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv4i64_nxv4i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.i32(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i32,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vwsubu.w_mask_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv4i64_nxv4i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vwsubu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i32,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwsubu.w_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv8i64_nxv8i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.i32(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    i32 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i32,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vwsubu.w_mask_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv8i64_nxv8i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vwsubu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vwsubu.wv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vwsubu.wv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vwsubu.wv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vwsubu.wv v8, v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vwsubu.wv v8, v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vwsubu.wv v8, v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vwsubu.wv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vwsubu.wv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vwsubu.wv v8, v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vwsubu.wv v8, v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 16 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vwsubu.wv v8, v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vwsubu.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vwsubu.wv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vwsubu.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vwsubu.wv v8, v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vwsubu.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vwsubu.wv v8, v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vwsubu.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vwsubu.wv v8, v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vwsubu.w_mask_wx_tie_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv1i16_nxv1i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %0,
-    i8 %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vwsubu.w_mask_wx_tie_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv2i16_nxv2i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %0,
-    i8 %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vwsubu.w_mask_wx_tie_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv4i16_nxv4i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %0,
-    i8 %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vwsubu.w_mask_wx_tie_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv8i16_nxv8i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %0,
-    i8 %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vwsubu.w_mask_wx_tie_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv16i16_nxv16i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %0,
-    i8 %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vwsubu.w_mask_wx_tie_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv32i16_nxv32i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %0,
-    i8 %1,
-    <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vwsubu.w_mask_wx_tie_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv1i32_nxv1i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %0,
-    i16 %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vwsubu.w_mask_wx_tie_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv2i32_nxv2i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %0,
-    i16 %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vwsubu.w_mask_wx_tie_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv4i32_nxv4i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %0,
-    i16 %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vwsubu.w_mask_wx_tie_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv8i32_nxv8i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %0,
-    i16 %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 16 x i32> @intrinsic_vwsubu.w_mask_wx_tie_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv16i32_nxv16i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %0,
-    i16 %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vwsubu.w_mask_wx_tie_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv1i64_nxv1i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %0,
-    i32 %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vwsubu.w_mask_wx_tie_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv2i64_nxv2i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %0,
-    i32 %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vwsubu.w_mask_wx_tie_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv4i64_nxv4i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %0,
-    i32 %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vwsubu.w_mask_wx_tie_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv8i64_nxv8i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %0,
-    i32 %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vwsubu.w_wv_untie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv1i16_nxv1i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vwsubu.wv v10, v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i8> %0,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vwsubu.w_wv_untie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv2i16_nxv2i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vwsubu.wv v10, v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i8> %0,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vwsubu.w_wv_untie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv4i16_nxv4i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vwsubu.wv v10, v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i8> %0,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vwsubu.w_wv_untie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv8i16_nxv8i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vwsubu.wv v12, v10, v8
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i8> %0,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vwsubu.w_wv_untie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv16i16_nxv16i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vwsubu.wv v16, v12, v8
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i8> %0,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vwsubu.w_wv_untie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv32i16_nxv32i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vwsubu.wv v24, v16, v8
-; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %1,
-    <vscale x 32 x i8> %0,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vwsubu.w_wv_untie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv1i32_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vwsubu.wv v10, v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i16> %0,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vwsubu.w_wv_untie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv2i32_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vwsubu.wv v10, v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i16> %0,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vwsubu.w_wv_untie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv4i32_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vwsubu.wv v12, v10, v8
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i16> %0,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vwsubu.w_wv_untie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv8i32_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vwsubu.wv v16, v12, v8
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i16> %0,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vwsubu.w_wv_untie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv1i64_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vwsubu.wv v10, v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %0,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vwsubu.w_wv_untie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv2i64_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vwsubu.wv v12, v10, v8
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %0,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vwsubu.w_wv_untie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv4i64_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vwsubu.wv v16, v12, v8
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %0,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vwsubu.w_wv_untie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv8i64_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vwsubu.wv v24, v16, v8
-; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %0,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w.ll
similarity index 87%
rename from llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vwsubu.w.ll
index ce1e1bdbeaef7f..b8b124de0d8e22 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -18,7 +20,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -28,10 +30,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwsubu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwsubu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -43,7 +45,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -52,9 +54,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwsubu.w_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwsubu.w_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -65,7 +67,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -75,10 +77,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwsubu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwsubu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -90,7 +92,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -99,9 +101,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwsubu.w_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwsubu.w_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -112,7 +114,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -122,10 +124,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwsubu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwsubu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -137,7 +139,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -146,9 +148,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwsubu.w_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwsubu.w_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -159,7 +161,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -169,10 +171,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwsubu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwsubu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -184,7 +186,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -193,9 +195,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwsubu.w_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwsubu.w_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -206,7 +208,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -216,10 +218,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwsubu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwsubu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -231,7 +233,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -240,9 +242,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwsubu.w_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwsubu.w_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -253,7 +255,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
     <vscale x 32 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -263,10 +265,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl4r.v v24, (a0)
@@ -279,7 +281,7 @@ entry:
     <vscale x 32 x i16> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -288,9 +290,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwsubu.w_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwsubu.w_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -301,7 +303,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -311,10 +313,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwsubu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwsubu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -326,7 +328,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -335,9 +337,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwsubu.w_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwsubu.w_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -348,7 +350,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -358,10 +360,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwsubu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwsubu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -373,7 +375,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -382,9 +384,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwsubu.w_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwsubu.w_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -395,7 +397,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -405,10 +407,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwsubu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwsubu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -420,7 +422,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -429,9 +431,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwsubu.w_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwsubu.w_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -442,7 +444,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -452,10 +454,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwsubu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwsubu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -467,7 +469,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -476,9 +478,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwsubu.w_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwsubu.w_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv16i32_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -489,7 +491,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -499,10 +501,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl4re16.v v24, (a0)
@@ -515,7 +517,7 @@ entry:
     <vscale x 16 x i32> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -524,9 +526,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwsubu.w_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwsubu.w_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -537,7 +539,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -547,10 +549,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwsubu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwsubu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -562,7 +564,7 @@ entry:
     <vscale x 1 x i64> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -571,9 +573,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwsubu.w_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwsubu.w_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -584,7 +586,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -594,10 +596,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwsubu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwsubu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -609,7 +611,7 @@ entry:
     <vscale x 2 x i64> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -618,9 +620,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwsubu.w_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwsubu.w_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -631,7 +633,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -641,10 +643,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwsubu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwsubu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -656,7 +658,7 @@ entry:
     <vscale x 4 x i64> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -665,9 +667,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwsubu.w_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwsubu.w_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -678,7 +680,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -688,10 +690,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwsubu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwsubu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl4re32.v v24, (a0)
@@ -704,7 +706,7 @@ entry:
     <vscale x 8 x i64> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -713,9 +715,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwsubu.w_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwsubu.w_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv1i16_nxv1i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -726,7 +728,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -736,10 +738,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.i8(
   <vscale x 1 x i16>,
   i8,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vwsubu.w_mask_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwsubu.w_mask_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv1i16_nxv1i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -751,7 +753,7 @@ entry:
     <vscale x 1 x i16> %1,
     i8 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -760,9 +762,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwsubu.w_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwsubu.w_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv2i16_nxv2i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -773,7 +775,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -783,10 +785,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.i8(
   <vscale x 2 x i16>,
   i8,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vwsubu.w_mask_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwsubu.w_mask_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv2i16_nxv2i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -798,7 +800,7 @@ entry:
     <vscale x 2 x i16> %1,
     i8 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -807,9 +809,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwsubu.w_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwsubu.w_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv4i16_nxv4i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -820,7 +822,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -830,10 +832,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.i8(
   <vscale x 4 x i16>,
   i8,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vwsubu.w_mask_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwsubu.w_mask_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv4i16_nxv4i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -845,7 +847,7 @@ entry:
     <vscale x 4 x i16> %1,
     i8 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -854,9 +856,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwsubu.w_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwsubu.w_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv8i16_nxv8i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -867,7 +869,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -877,10 +879,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.i8(
   <vscale x 8 x i16>,
   i8,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vwsubu.w_mask_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwsubu.w_mask_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv8i16_nxv8i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -892,7 +894,7 @@ entry:
     <vscale x 8 x i16> %1,
     i8 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -901,9 +903,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwsubu.w_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwsubu.w_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv16i16_nxv16i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -914,7 +916,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -924,10 +926,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.i8(
   <vscale x 16 x i16>,
   i8,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vwsubu.w_mask_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwsubu.w_mask_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv16i16_nxv16i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -939,7 +941,7 @@ entry:
     <vscale x 16 x i16> %1,
     i8 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -948,9 +950,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   i8,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwsubu.w_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwsubu.w_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv32i16_nxv32i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -961,7 +963,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
     i8 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -971,10 +973,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.i8(
   <vscale x 32 x i16>,
   i8,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vwsubu.w_mask_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwsubu.w_mask_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv32i16_nxv32i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -986,7 +988,7 @@ entry:
     <vscale x 32 x i16> %1,
     i8 %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -995,9 +997,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwsubu.w_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwsubu.w_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv1i32_nxv1i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1008,7 +1010,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1018,10 +1020,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.i16(
   <vscale x 1 x i32>,
   i16,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vwsubu.w_mask_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwsubu.w_mask_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv1i32_nxv1i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1033,7 +1035,7 @@ entry:
     <vscale x 1 x i32> %1,
     i16 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1042,9 +1044,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwsubu.w_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwsubu.w_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv2i32_nxv2i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1055,7 +1057,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1065,10 +1067,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.i16(
   <vscale x 2 x i32>,
   i16,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vwsubu.w_mask_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwsubu.w_mask_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv2i32_nxv2i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1080,7 +1082,7 @@ entry:
     <vscale x 2 x i32> %1,
     i16 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1089,9 +1091,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwsubu.w_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwsubu.w_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv4i32_nxv4i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1102,7 +1104,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1112,10 +1114,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.i16(
   <vscale x 4 x i32>,
   i16,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vwsubu.w_mask_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwsubu.w_mask_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv4i32_nxv4i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1127,7 +1129,7 @@ entry:
     <vscale x 4 x i32> %1,
     i16 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1136,9 +1138,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwsubu.w_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwsubu.w_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv8i32_nxv8i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1149,7 +1151,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1159,10 +1161,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.i16(
   <vscale x 8 x i32>,
   i16,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vwsubu.w_mask_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwsubu.w_mask_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv8i32_nxv8i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1174,7 +1176,7 @@ entry:
     <vscale x 8 x i32> %1,
     i16 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1183,9 +1185,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   i16,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwsubu.w_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwsubu.w_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv16i32_nxv16i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1196,7 +1198,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
     i16 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1206,10 +1208,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.i16(
   <vscale x 16 x i32>,
   i16,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vwsubu.w_mask_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwsubu.w_mask_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv16i32_nxv16i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1221,7 +1223,7 @@ entry:
     <vscale x 16 x i32> %1,
     i16 %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1230,9 +1232,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwsubu.w_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwsubu.w_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv1i64_nxv1i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1243,7 +1245,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1253,10 +1255,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.i32(
   <vscale x 1 x i64>,
   i32,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vwsubu.w_mask_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwsubu.w_mask_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv1i64_nxv1i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1268,7 +1270,7 @@ entry:
     <vscale x 1 x i64> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1277,9 +1279,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwsubu.w_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwsubu.w_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv2i64_nxv2i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1290,7 +1292,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1300,10 +1302,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.i32(
   <vscale x 2 x i64>,
   i32,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vwsubu.w_mask_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwsubu.w_mask_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv2i64_nxv2i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1315,7 +1317,7 @@ entry:
     <vscale x 2 x i64> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1324,9 +1326,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwsubu.w_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwsubu.w_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv4i64_nxv4i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1337,7 +1339,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1347,10 +1349,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.i32(
   <vscale x 4 x i64>,
   i32,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vwsubu.w_mask_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwsubu.w_mask_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv4i64_nxv4i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1362,7 +1364,7 @@ entry:
     <vscale x 4 x i64> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1371,9 +1373,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   i32,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwsubu.w_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwsubu.w_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv8i64_nxv8i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1384,7 +1386,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
     i32 %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1394,10 +1396,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.i32(
   <vscale x 8 x i64>,
   i32,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vwsubu.w_mask_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwsubu.w_mask_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv8i64_nxv8i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1409,12 +1411,12 @@ entry:
     <vscale x 8 x i64> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -1426,12 +1428,12 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x i8> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -1443,12 +1445,12 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x i8> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -1460,12 +1462,12 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x i8> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -1477,12 +1479,12 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -1494,12 +1496,12 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x i8> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 32 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -1511,12 +1513,12 @@ entry:
     <vscale x 32 x i16> %0,
     <vscale x 32 x i8> %1,
     <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -1528,12 +1530,12 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x i16> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -1545,12 +1547,12 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x i16> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -1562,12 +1564,12 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -1579,12 +1581,12 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x i16> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 16 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -1596,12 +1598,12 @@ entry:
     <vscale x 16 x i32> %0,
     <vscale x 16 x i16> %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
 
-define <vscale x 1 x i64> @intrinsic_vwsubu.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwsubu.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -1613,12 +1615,12 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x i32> %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
 
-define <vscale x 2 x i64> @intrinsic_vwsubu.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwsubu.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -1630,12 +1632,12 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
 
-define <vscale x 4 x i64> @intrinsic_vwsubu.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwsubu.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -1647,12 +1649,12 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x i32> %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
 
-define <vscale x 8 x i64> @intrinsic_vwsubu.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwsubu.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -1664,12 +1666,12 @@ entry:
     <vscale x 8 x i64> %0,
     <vscale x 8 x i32> %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vwsubu.w_mask_wx_tie_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwsubu.w_mask_wx_tie_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv1i16_nxv1i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -1681,12 +1683,12 @@ entry:
     <vscale x 1 x i16> %0,
     i8 %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vwsubu.w_mask_wx_tie_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwsubu.w_mask_wx_tie_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv2i16_nxv2i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -1698,12 +1700,12 @@ entry:
     <vscale x 2 x i16> %0,
     i8 %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vwsubu.w_mask_wx_tie_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwsubu.w_mask_wx_tie_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv4i16_nxv4i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -1715,12 +1717,12 @@ entry:
     <vscale x 4 x i16> %0,
     i8 %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vwsubu.w_mask_wx_tie_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwsubu.w_mask_wx_tie_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv8i16_nxv8i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -1732,12 +1734,12 @@ entry:
     <vscale x 8 x i16> %0,
     i8 %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vwsubu.w_mask_wx_tie_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwsubu.w_mask_wx_tie_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv16i16_nxv16i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -1749,12 +1751,12 @@ entry:
     <vscale x 16 x i16> %0,
     i8 %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 32 x i16> @intrinsic_vwsubu.w_mask_wx_tie_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwsubu.w_mask_wx_tie_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv32i16_nxv32i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1766,12 +1768,12 @@ entry:
     <vscale x 32 x i16> %0,
     i8 %1,
     <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vwsubu.w_mask_wx_tie_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwsubu.w_mask_wx_tie_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv1i32_nxv1i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1783,12 +1785,12 @@ entry:
     <vscale x 1 x i32> %0,
     i16 %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vwsubu.w_mask_wx_tie_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwsubu.w_mask_wx_tie_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv2i32_nxv2i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1800,12 +1802,12 @@ entry:
     <vscale x 2 x i32> %0,
     i16 %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vwsubu.w_mask_wx_tie_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwsubu.w_mask_wx_tie_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv4i32_nxv4i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1817,12 +1819,12 @@ entry:
     <vscale x 4 x i32> %0,
     i16 %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vwsubu.w_mask_wx_tie_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwsubu.w_mask_wx_tie_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv8i32_nxv8i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1834,12 +1836,12 @@ entry:
     <vscale x 8 x i32> %0,
     i16 %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 16 x i32> @intrinsic_vwsubu.w_mask_wx_tie_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vwsubu.w_mask_wx_tie_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv16i32_nxv16i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1851,12 +1853,12 @@ entry:
     <vscale x 16 x i32> %0,
     i16 %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
 
-define <vscale x 1 x i64> @intrinsic_vwsubu.w_mask_wx_tie_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwsubu.w_mask_wx_tie_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv1i64_nxv1i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1868,12 +1870,12 @@ entry:
     <vscale x 1 x i64> %0,
     i32 %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
 
-define <vscale x 2 x i64> @intrinsic_vwsubu.w_mask_wx_tie_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwsubu.w_mask_wx_tie_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv2i64_nxv2i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1885,12 +1887,12 @@ entry:
     <vscale x 2 x i64> %0,
     i32 %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
 
-define <vscale x 4 x i64> @intrinsic_vwsubu.w_mask_wx_tie_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwsubu.w_mask_wx_tie_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv4i64_nxv4i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1902,12 +1904,12 @@ entry:
     <vscale x 4 x i64> %0,
     i32 %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
 
-define <vscale x 8 x i64> @intrinsic_vwsubu.w_mask_wx_tie_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwsubu.w_mask_wx_tie_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv8i64_nxv8i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1919,12 +1921,12 @@ entry:
     <vscale x 8 x i64> %0,
     i32 %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vwsubu.w_wv_untie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vwsubu.w_wv_untie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -1936,12 +1938,12 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %1,
     <vscale x 1 x i8> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vwsubu.w_wv_untie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vwsubu.w_wv_untie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -1953,12 +1955,12 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %1,
     <vscale x 2 x i8> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vwsubu.w_wv_untie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vwsubu.w_wv_untie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -1970,12 +1972,12 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i8> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vwsubu.w_wv_untie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vwsubu.w_wv_untie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -1987,12 +1989,12 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %1,
     <vscale x 8 x i8> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vwsubu.w_wv_untie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vwsubu.w_wv_untie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -2004,12 +2006,12 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %1,
     <vscale x 16 x i8> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 32 x i16> @intrinsic_vwsubu.w_wv_untie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vwsubu.w_wv_untie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -2021,12 +2023,12 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %1,
     <vscale x 32 x i8> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vwsubu.w_wv_untie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vwsubu.w_wv_untie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -2038,12 +2040,12 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %1,
     <vscale x 1 x i16> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vwsubu.w_wv_untie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vwsubu.w_wv_untie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -2055,12 +2057,12 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i16> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vwsubu.w_wv_untie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vwsubu.w_wv_untie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -2072,12 +2074,12 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %1,
     <vscale x 4 x i16> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vwsubu.w_wv_untie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vwsubu.w_wv_untie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -2089,12 +2091,12 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %1,
     <vscale x 8 x i16> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 1 x i64> @intrinsic_vwsubu.w_wv_untie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vwsubu.w_wv_untie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -2106,12 +2108,12 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %1,
     <vscale x 1 x i32> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
 
-define <vscale x 2 x i64> @intrinsic_vwsubu.w_wv_untie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vwsubu.w_wv_untie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -2123,12 +2125,12 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %1,
     <vscale x 2 x i32> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
 
-define <vscale x 4 x i64> @intrinsic_vwsubu.w_wv_untie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vwsubu.w_wv_untie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -2140,12 +2142,12 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %1,
     <vscale x 4 x i32> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
 
-define <vscale x 8 x i64> @intrinsic_vwsubu.w_wv_untie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vwsubu.w_wv_untie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -2157,7 +2159,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %1,
     <vscale x 8 x i32> %0,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }


        


More information about the llvm-commits mailing list