[llvm] 85e42db - [RISCV] Merge some rvv intrinsic test cases that only differ by XLen type.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sat Jan 22 22:17:10 PST 2022


Author: Craig Topper
Date: 2022-01-22T21:55:29-08:00
New Revision: 85e42db1b6db16a2dab4405604971d84899612bf

URL: https://github.com/llvm/llvm-project/commit/85e42db1b6db16a2dab4405604971d84899612bf
DIFF: https://github.com/llvm/llvm-project/commit/85e42db1b6db16a2dab4405604971d84899612bf.diff

LOG: [RISCV] Merge some rvv intrinsic test cases that only differ by XLen type.

Instead of having a test for i32 XLen and i64 XLen, use sed to
replace iXLen with i32/i64 before running llc.

This change updates tests for intrinsics that operate exclusively
on mask values. It removes over 4000 lines worth of test content.
More merging will come in future changes.

Differential Revision: https://reviews.llvm.org/D117968

Added: 
    llvm/test/CodeGen/RISCV/rvv/vcpop.ll
    llvm/test/CodeGen/RISCV/rvv/vfirst.ll
    llvm/test/CodeGen/RISCV/rvv/vid.ll
    llvm/test/CodeGen/RISCV/rvv/viota.ll
    llvm/test/CodeGen/RISCV/rvv/vlm.ll
    llvm/test/CodeGen/RISCV/rvv/vmand.ll
    llvm/test/CodeGen/RISCV/rvv/vmandn.ll
    llvm/test/CodeGen/RISCV/rvv/vmclr.ll
    llvm/test/CodeGen/RISCV/rvv/vmnand.ll
    llvm/test/CodeGen/RISCV/rvv/vmnor.ll
    llvm/test/CodeGen/RISCV/rvv/vmor.ll
    llvm/test/CodeGen/RISCV/rvv/vmorn.ll
    llvm/test/CodeGen/RISCV/rvv/vmset.ll
    llvm/test/CodeGen/RISCV/rvv/vmsif.ll
    llvm/test/CodeGen/RISCV/rvv/vmsof.ll
    llvm/test/CodeGen/RISCV/rvv/vmxnor.ll
    llvm/test/CodeGen/RISCV/rvv/vmxor.ll
    llvm/test/CodeGen/RISCV/rvv/vsm.ll

Modified: 
    

Removed: 
    llvm/test/CodeGen/RISCV/rvv/vcpop-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vcpop-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfirst-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfirst-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vid-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vid-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/viota-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/viota-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vlm-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vlm-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmand-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmand-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmandn-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmandn-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmclr-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmclr-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmnand-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmnand-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmnor-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmnor-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmor-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmor-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmorn-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmorn-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmset-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmset-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmxnor-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmxnor-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmxor-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmxor-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsm-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vsm-rv64.ll


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/vcpop-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vcpop-rv32.ll
deleted file mode 100644
index fe5bba14eb6d2..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vcpop-rv32.ll
+++ /dev/null
@@ -1,282 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare i32 @llvm.riscv.vcpop.i32.nxv1i1(
-  <vscale x 1 x i1>,
-  i32);
-
-define i32 @intrinsic_vcpop_m_i32_nxv1i1(<vscale x 1 x i1> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv1i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call i32 @llvm.riscv.vcpop.i32.nxv1i1(
-    <vscale x 1 x i1> %0,
-    i32 %1)
-
-  ret i32 %a
-}
-
-declare i32 @llvm.riscv.vcpop.mask.i32.nxv1i1(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i1>,
-  i32);
-
-define i32 @intrinsic_vcpop_mask_m_i32_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv1i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vcpop.m a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call i32 @llvm.riscv.vcpop.mask.i32.nxv1i1(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i1> %1,
-    i32 %2)
-
-  ret i32 %a
-}
-
-declare i32 @llvm.riscv.vcpop.i32.nxv2i1(
-  <vscale x 2 x i1>,
-  i32);
-
-define i32 @intrinsic_vcpop_m_i32_nxv2i1(<vscale x 2 x i1> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv2i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call i32 @llvm.riscv.vcpop.i32.nxv2i1(
-    <vscale x 2 x i1> %0,
-    i32 %1)
-
-  ret i32 %a
-}
-
-declare i32 @llvm.riscv.vcpop.mask.i32.nxv2i1(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i1>,
-  i32);
-
-define i32 @intrinsic_vcpop_mask_m_i32_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv2i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vcpop.m a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call i32 @llvm.riscv.vcpop.mask.i32.nxv2i1(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i1> %1,
-    i32 %2)
-
-  ret i32 %a
-}
-
-declare i32 @llvm.riscv.vcpop.i32.nxv4i1(
-  <vscale x 4 x i1>,
-  i32);
-
-define i32 @intrinsic_vcpop_m_i32_nxv4i1(<vscale x 4 x i1> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv4i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call i32 @llvm.riscv.vcpop.i32.nxv4i1(
-    <vscale x 4 x i1> %0,
-    i32 %1)
-
-  ret i32 %a
-}
-
-declare i32 @llvm.riscv.vcpop.mask.i32.nxv4i1(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i1>,
-  i32);
-
-define i32 @intrinsic_vcpop_mask_m_i32_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv4i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vcpop.m a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call i32 @llvm.riscv.vcpop.mask.i32.nxv4i1(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i1> %1,
-    i32 %2)
-
-  ret i32 %a
-}
-
-declare i32 @llvm.riscv.vcpop.i32.nxv8i1(
-  <vscale x 8 x i1>,
-  i32);
-
-define i32 @intrinsic_vcpop_m_i32_nxv8i1(<vscale x 8 x i1> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv8i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call i32 @llvm.riscv.vcpop.i32.nxv8i1(
-    <vscale x 8 x i1> %0,
-    i32 %1)
-
-  ret i32 %a
-}
-
-declare i32 @llvm.riscv.vcpop.mask.i32.nxv8i1(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i1>,
-  i32);
-
-define i32 @intrinsic_vcpop_mask_m_i32_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv8i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vcpop.m a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call i32 @llvm.riscv.vcpop.mask.i32.nxv8i1(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i1> %1,
-    i32 %2)
-
-  ret i32 %a
-}
-
-declare i32 @llvm.riscv.vcpop.i32.nxv16i1(
-  <vscale x 16 x i1>,
-  i32);
-
-define i32 @intrinsic_vcpop_m_i32_nxv16i1(<vscale x 16 x i1> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv16i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call i32 @llvm.riscv.vcpop.i32.nxv16i1(
-    <vscale x 16 x i1> %0,
-    i32 %1)
-
-  ret i32 %a
-}
-
-declare i32 @llvm.riscv.vcpop.mask.i32.nxv16i1(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i1>,
-  i32);
-
-define i32 @intrinsic_vcpop_mask_m_i32_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv16i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vcpop.m a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call i32 @llvm.riscv.vcpop.mask.i32.nxv16i1(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i1> %1,
-    i32 %2)
-
-  ret i32 %a
-}
-
-declare i32 @llvm.riscv.vcpop.i32.nxv32i1(
-  <vscale x 32 x i1>,
-  i32);
-
-define i32 @intrinsic_vcpop_m_i32_nxv32i1(<vscale x 32 x i1> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv32i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call i32 @llvm.riscv.vcpop.i32.nxv32i1(
-    <vscale x 32 x i1> %0,
-    i32 %1)
-
-  ret i32 %a
-}
-
-declare i32 @llvm.riscv.vcpop.mask.i32.nxv32i1(
-  <vscale x 32 x i1>,
-  <vscale x 32 x i1>,
-  i32);
-
-define i32 @intrinsic_vcpop_mask_m_i32_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv32i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vcpop.m a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call i32 @llvm.riscv.vcpop.mask.i32.nxv32i1(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i1> %1,
-    i32 %2)
-
-  ret i32 %a
-}
-
-declare i32 @llvm.riscv.vcpop.i32.nxv64i1(
-  <vscale x 64 x i1>,
-  i32);
-
-define i32 @intrinsic_vcpop_m_i32_nxv64i1(<vscale x 64 x i1> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv64i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call i32 @llvm.riscv.vcpop.i32.nxv64i1(
-    <vscale x 64 x i1> %0,
-    i32 %1)
-
-  ret i32 %a
-}
-
-declare i32 @llvm.riscv.vcpop.mask.i32.nxv64i1(
-  <vscale x 64 x i1>,
-  <vscale x 64 x i1>,
-  i32);
-
-define i32 @intrinsic_vcpop_mask_m_i32_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv64i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vcpop.m a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call i32 @llvm.riscv.vcpop.mask.i32.nxv64i1(
-    <vscale x 64 x i1> %0,
-    <vscale x 64 x i1> %1,
-    i32 %2)
-
-  ret i32 %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vcpop-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vcpop-rv64.ll
deleted file mode 100644
index 8d583b8df634a..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vcpop-rv64.ll
+++ /dev/null
@@ -1,282 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare i64 @llvm.riscv.vcpop.i64.nxv1i1(
-  <vscale x 1 x i1>,
-  i64);
-
-define i64 @intrinsic_vcpop_m_i64_nxv1i1(<vscale x 1 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv1i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call i64 @llvm.riscv.vcpop.i64.nxv1i1(
-    <vscale x 1 x i1> %0,
-    i64 %1)
-
-  ret i64 %a
-}
-
-declare i64 @llvm.riscv.vcpop.mask.i64.nxv1i1(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i1>,
-  i64);
-
-define i64 @intrinsic_vcpop_mask_m_i64_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv1i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vcpop.m a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call i64 @llvm.riscv.vcpop.mask.i64.nxv1i1(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i1> %1,
-    i64 %2)
-
-  ret i64 %a
-}
-
-declare i64 @llvm.riscv.vcpop.i64.nxv2i1(
-  <vscale x 2 x i1>,
-  i64);
-
-define i64 @intrinsic_vcpop_m_i64_nxv2i1(<vscale x 2 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv2i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call i64 @llvm.riscv.vcpop.i64.nxv2i1(
-    <vscale x 2 x i1> %0,
-    i64 %1)
-
-  ret i64 %a
-}
-
-declare i64 @llvm.riscv.vcpop.mask.i64.nxv2i1(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i1>,
-  i64);
-
-define i64 @intrinsic_vcpop_mask_m_i64_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv2i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vcpop.m a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call i64 @llvm.riscv.vcpop.mask.i64.nxv2i1(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i1> %1,
-    i64 %2)
-
-  ret i64 %a
-}
-
-declare i64 @llvm.riscv.vcpop.i64.nxv4i1(
-  <vscale x 4 x i1>,
-  i64);
-
-define i64 @intrinsic_vcpop_m_i64_nxv4i1(<vscale x 4 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv4i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call i64 @llvm.riscv.vcpop.i64.nxv4i1(
-    <vscale x 4 x i1> %0,
-    i64 %1)
-
-  ret i64 %a
-}
-
-declare i64 @llvm.riscv.vcpop.mask.i64.nxv4i1(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i1>,
-  i64);
-
-define i64 @intrinsic_vcpop_mask_m_i64_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv4i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vcpop.m a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call i64 @llvm.riscv.vcpop.mask.i64.nxv4i1(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i1> %1,
-    i64 %2)
-
-  ret i64 %a
-}
-
-declare i64 @llvm.riscv.vcpop.i64.nxv8i1(
-  <vscale x 8 x i1>,
-  i64);
-
-define i64 @intrinsic_vcpop_m_i64_nxv8i1(<vscale x 8 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv8i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call i64 @llvm.riscv.vcpop.i64.nxv8i1(
-    <vscale x 8 x i1> %0,
-    i64 %1)
-
-  ret i64 %a
-}
-
-declare i64 @llvm.riscv.vcpop.mask.i64.nxv8i1(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i1>,
-  i64);
-
-define i64 @intrinsic_vcpop_mask_m_i64_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv8i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vcpop.m a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call i64 @llvm.riscv.vcpop.mask.i64.nxv8i1(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i1> %1,
-    i64 %2)
-
-  ret i64 %a
-}
-
-declare i64 @llvm.riscv.vcpop.i64.nxv16i1(
-  <vscale x 16 x i1>,
-  i64);
-
-define i64 @intrinsic_vcpop_m_i64_nxv16i1(<vscale x 16 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv16i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call i64 @llvm.riscv.vcpop.i64.nxv16i1(
-    <vscale x 16 x i1> %0,
-    i64 %1)
-
-  ret i64 %a
-}
-
-declare i64 @llvm.riscv.vcpop.mask.i64.nxv16i1(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i1>,
-  i64);
-
-define i64 @intrinsic_vcpop_mask_m_i64_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv16i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vcpop.m a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call i64 @llvm.riscv.vcpop.mask.i64.nxv16i1(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i1> %1,
-    i64 %2)
-
-  ret i64 %a
-}
-
-declare i64 @llvm.riscv.vcpop.i64.nxv32i1(
-  <vscale x 32 x i1>,
-  i64);
-
-define i64 @intrinsic_vcpop_m_i64_nxv32i1(<vscale x 32 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv32i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call i64 @llvm.riscv.vcpop.i64.nxv32i1(
-    <vscale x 32 x i1> %0,
-    i64 %1)
-
-  ret i64 %a
-}
-
-declare i64 @llvm.riscv.vcpop.mask.i64.nxv32i1(
-  <vscale x 32 x i1>,
-  <vscale x 32 x i1>,
-  i64);
-
-define i64 @intrinsic_vcpop_mask_m_i64_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv32i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vcpop.m a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call i64 @llvm.riscv.vcpop.mask.i64.nxv32i1(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i1> %1,
-    i64 %2)
-
-  ret i64 %a
-}
-
-declare i64 @llvm.riscv.vcpop.i64.nxv64i1(
-  <vscale x 64 x i1>,
-  i64);
-
-define i64 @intrinsic_vcpop_m_i64_nxv64i1(<vscale x 64 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv64i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT:    vcpop.m a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call i64 @llvm.riscv.vcpop.i64.nxv64i1(
-    <vscale x 64 x i1> %0,
-    i64 %1)
-
-  ret i64 %a
-}
-
-declare i64 @llvm.riscv.vcpop.mask.i64.nxv64i1(
-  <vscale x 64 x i1>,
-  <vscale x 64 x i1>,
-  i64);
-
-define i64 @intrinsic_vcpop_mask_m_i64_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv64i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vcpop.m a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call i64 @llvm.riscv.vcpop.mask.i64.nxv64i1(
-    <vscale x 64 x i1> %0,
-    <vscale x 64 x i1> %1,
-    i64 %2)
-
-  ret i64 %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vcpop.ll b/llvm/test/CodeGen/RISCV/rvv/vcpop.ll
new file mode 100644
index 0000000000000..1b77ec8dd82e0
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vcpop.ll
@@ -0,0 +1,284 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+declare iXLen @llvm.riscv.vcpop.iXLen.nxv1i1(
+  <vscale x 1 x i1>,
+  iXLen);
+
+define iXLen @intrinsic_vcpop_m_nxv1i1(<vscale x 1 x i1> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_m_nxv1i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vcpop.m a0, v0
+; CHECK-NEXT:    ret
+entry:
+  %a = call iXLen @llvm.riscv.vcpop.iXLen.nxv1i1(
+    <vscale x 1 x i1> %0,
+    iXLen %1)
+
+  ret iXLen %a
+}
+
+declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv1i1(
+  <vscale x 1 x i1>,
+  <vscale x 1 x i1>,
+  iXLen);
+
+define iXLen @intrinsic_vcpop_mask_m_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv1i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmv1r.v v9, v0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vmv1r.v v0, v8
+; CHECK-NEXT:    vcpop.m a0, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call iXLen @llvm.riscv.vcpop.mask.iXLen.nxv1i1(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x i1> %1,
+    iXLen %2)
+
+  ret iXLen %a
+}
+
+declare iXLen @llvm.riscv.vcpop.iXLen.nxv2i1(
+  <vscale x 2 x i1>,
+  iXLen);
+
+define iXLen @intrinsic_vcpop_m_nxv2i1(<vscale x 2 x i1> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_m_nxv2i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vcpop.m a0, v0
+; CHECK-NEXT:    ret
+entry:
+  %a = call iXLen @llvm.riscv.vcpop.iXLen.nxv2i1(
+    <vscale x 2 x i1> %0,
+    iXLen %1)
+
+  ret iXLen %a
+}
+
+declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv2i1(
+  <vscale x 2 x i1>,
+  <vscale x 2 x i1>,
+  iXLen);
+
+define iXLen @intrinsic_vcpop_mask_m_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv2i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmv1r.v v9, v0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vmv1r.v v0, v8
+; CHECK-NEXT:    vcpop.m a0, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call iXLen @llvm.riscv.vcpop.mask.iXLen.nxv2i1(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x i1> %1,
+    iXLen %2)
+
+  ret iXLen %a
+}
+
+declare iXLen @llvm.riscv.vcpop.iXLen.nxv4i1(
+  <vscale x 4 x i1>,
+  iXLen);
+
+define iXLen @intrinsic_vcpop_m_nxv4i1(<vscale x 4 x i1> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_m_nxv4i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vcpop.m a0, v0
+; CHECK-NEXT:    ret
+entry:
+  %a = call iXLen @llvm.riscv.vcpop.iXLen.nxv4i1(
+    <vscale x 4 x i1> %0,
+    iXLen %1)
+
+  ret iXLen %a
+}
+
+declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv4i1(
+  <vscale x 4 x i1>,
+  <vscale x 4 x i1>,
+  iXLen);
+
+define iXLen @intrinsic_vcpop_mask_m_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv4i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmv1r.v v9, v0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vmv1r.v v0, v8
+; CHECK-NEXT:    vcpop.m a0, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call iXLen @llvm.riscv.vcpop.mask.iXLen.nxv4i1(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x i1> %1,
+    iXLen %2)
+
+  ret iXLen %a
+}
+
+declare iXLen @llvm.riscv.vcpop.iXLen.nxv8i1(
+  <vscale x 8 x i1>,
+  iXLen);
+
+define iXLen @intrinsic_vcpop_m_nxv8i1(<vscale x 8 x i1> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_m_nxv8i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vcpop.m a0, v0
+; CHECK-NEXT:    ret
+entry:
+  %a = call iXLen @llvm.riscv.vcpop.iXLen.nxv8i1(
+    <vscale x 8 x i1> %0,
+    iXLen %1)
+
+  ret iXLen %a
+}
+
+declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv8i1(
+  <vscale x 8 x i1>,
+  <vscale x 8 x i1>,
+  iXLen);
+
+define iXLen @intrinsic_vcpop_mask_m_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv8i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmv1r.v v9, v0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vmv1r.v v0, v8
+; CHECK-NEXT:    vcpop.m a0, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call iXLen @llvm.riscv.vcpop.mask.iXLen.nxv8i1(
+    <vscale x 8 x i1> %0,
+    <vscale x 8 x i1> %1,
+    iXLen %2)
+
+  ret iXLen %a
+}
+
+declare iXLen @llvm.riscv.vcpop.iXLen.nxv16i1(
+  <vscale x 16 x i1>,
+  iXLen);
+
+define iXLen @intrinsic_vcpop_m_nxv16i1(<vscale x 16 x i1> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_m_nxv16i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vcpop.m a0, v0
+; CHECK-NEXT:    ret
+entry:
+  %a = call iXLen @llvm.riscv.vcpop.iXLen.nxv16i1(
+    <vscale x 16 x i1> %0,
+    iXLen %1)
+
+  ret iXLen %a
+}
+
+declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv16i1(
+  <vscale x 16 x i1>,
+  <vscale x 16 x i1>,
+  iXLen);
+
+define iXLen @intrinsic_vcpop_mask_m_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv16i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmv1r.v v9, v0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vmv1r.v v0, v8
+; CHECK-NEXT:    vcpop.m a0, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call iXLen @llvm.riscv.vcpop.mask.iXLen.nxv16i1(
+    <vscale x 16 x i1> %0,
+    <vscale x 16 x i1> %1,
+    iXLen %2)
+
+  ret iXLen %a
+}
+
+declare iXLen @llvm.riscv.vcpop.iXLen.nxv32i1(
+  <vscale x 32 x i1>,
+  iXLen);
+
+define iXLen @intrinsic_vcpop_m_nxv32i1(<vscale x 32 x i1> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_m_nxv32i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    vcpop.m a0, v0
+; CHECK-NEXT:    ret
+entry:
+  %a = call iXLen @llvm.riscv.vcpop.iXLen.nxv32i1(
+    <vscale x 32 x i1> %0,
+    iXLen %1)
+
+  ret iXLen %a
+}
+
+declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv32i1(
+  <vscale x 32 x i1>,
+  <vscale x 32 x i1>,
+  iXLen);
+
+define iXLen @intrinsic_vcpop_mask_m_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv32i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmv1r.v v9, v0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    vmv1r.v v0, v8
+; CHECK-NEXT:    vcpop.m a0, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call iXLen @llvm.riscv.vcpop.mask.iXLen.nxv32i1(
+    <vscale x 32 x i1> %0,
+    <vscale x 32 x i1> %1,
+    iXLen %2)
+
+  ret iXLen %a
+}
+
+declare iXLen @llvm.riscv.vcpop.iXLen.nxv64i1(
+  <vscale x 64 x i1>,
+  iXLen);
+
+define iXLen @intrinsic_vcpop_m_nxv64i1(<vscale x 64 x i1> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_m_nxv64i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT:    vcpop.m a0, v0
+; CHECK-NEXT:    ret
+entry:
+  %a = call iXLen @llvm.riscv.vcpop.iXLen.nxv64i1(
+    <vscale x 64 x i1> %0,
+    iXLen %1)
+
+  ret iXLen %a
+}
+
+declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv64i1(
+  <vscale x 64 x i1>,
+  <vscale x 64 x i1>,
+  iXLen);
+
+define iXLen @intrinsic_vcpop_mask_m_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv64i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmv1r.v v9, v0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT:    vmv1r.v v0, v8
+; CHECK-NEXT:    vcpop.m a0, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call iXLen @llvm.riscv.vcpop.mask.iXLen.nxv64i1(
+    <vscale x 64 x i1> %0,
+    <vscale x 64 x i1> %1,
+    iXLen %2)
+
+  ret iXLen %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfirst-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfirst-rv32.ll
deleted file mode 100644
index 6d6d3a8c74616..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfirst-rv32.ll
+++ /dev/null
@@ -1,282 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare i32 @llvm.riscv.vfirst.i32.nxv1i1(
-  <vscale x 1 x i1>,
-  i32);
-
-define i32 @intrinsic_vfirst_m_i32_nxv1i1(<vscale x 1 x i1> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfirst_m_i32_nxv1i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call i32 @llvm.riscv.vfirst.i32.nxv1i1(
-    <vscale x 1 x i1> %0,
-    i32 %1)
-
-  ret i32 %a
-}
-
-declare i32 @llvm.riscv.vfirst.mask.i32.nxv1i1(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i1>,
-  i32);
-
-define i32 @intrinsic_vfirst_mask_m_i32_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfirst_mask_m_i32_nxv1i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vfirst.m a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv1i1(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i1> %1,
-    i32 %2)
-
-  ret i32 %a
-}
-
-declare i32 @llvm.riscv.vfirst.i32.nxv2i1(
-  <vscale x 2 x i1>,
-  i32);
-
-define i32 @intrinsic_vfirst_m_i32_nxv2i1(<vscale x 2 x i1> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfirst_m_i32_nxv2i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call i32 @llvm.riscv.vfirst.i32.nxv2i1(
-    <vscale x 2 x i1> %0,
-    i32 %1)
-
-  ret i32 %a
-}
-
-declare i32 @llvm.riscv.vfirst.mask.i32.nxv2i1(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i1>,
-  i32);
-
-define i32 @intrinsic_vfirst_mask_m_i32_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfirst_mask_m_i32_nxv2i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vfirst.m a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv2i1(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i1> %1,
-    i32 %2)
-
-  ret i32 %a
-}
-
-declare i32 @llvm.riscv.vfirst.i32.nxv4i1(
-  <vscale x 4 x i1>,
-  i32);
-
-define i32 @intrinsic_vfirst_m_i32_nxv4i1(<vscale x 4 x i1> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfirst_m_i32_nxv4i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call i32 @llvm.riscv.vfirst.i32.nxv4i1(
-    <vscale x 4 x i1> %0,
-    i32 %1)
-
-  ret i32 %a
-}
-
-declare i32 @llvm.riscv.vfirst.mask.i32.nxv4i1(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i1>,
-  i32);
-
-define i32 @intrinsic_vfirst_mask_m_i32_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfirst_mask_m_i32_nxv4i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vfirst.m a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv4i1(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i1> %1,
-    i32 %2)
-
-  ret i32 %a
-}
-
-declare i32 @llvm.riscv.vfirst.i32.nxv8i1(
-  <vscale x 8 x i1>,
-  i32);
-
-define i32 @intrinsic_vfirst_m_i32_nxv8i1(<vscale x 8 x i1> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfirst_m_i32_nxv8i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call i32 @llvm.riscv.vfirst.i32.nxv8i1(
-    <vscale x 8 x i1> %0,
-    i32 %1)
-
-  ret i32 %a
-}
-
-declare i32 @llvm.riscv.vfirst.mask.i32.nxv8i1(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i1>,
-  i32);
-
-define i32 @intrinsic_vfirst_mask_m_i32_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfirst_mask_m_i32_nxv8i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vfirst.m a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv8i1(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i1> %1,
-    i32 %2)
-
-  ret i32 %a
-}
-
-declare i32 @llvm.riscv.vfirst.i32.nxv16i1(
-  <vscale x 16 x i1>,
-  i32);
-
-define i32 @intrinsic_vfirst_m_i32_nxv16i1(<vscale x 16 x i1> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfirst_m_i32_nxv16i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call i32 @llvm.riscv.vfirst.i32.nxv16i1(
-    <vscale x 16 x i1> %0,
-    i32 %1)
-
-  ret i32 %a
-}
-
-declare i32 @llvm.riscv.vfirst.mask.i32.nxv16i1(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i1>,
-  i32);
-
-define i32 @intrinsic_vfirst_mask_m_i32_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfirst_mask_m_i32_nxv16i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vfirst.m a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv16i1(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i1> %1,
-    i32 %2)
-
-  ret i32 %a
-}
-
-declare i32 @llvm.riscv.vfirst.i32.nxv32i1(
-  <vscale x 32 x i1>,
-  i32);
-
-define i32 @intrinsic_vfirst_m_i32_nxv32i1(<vscale x 32 x i1> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfirst_m_i32_nxv32i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call i32 @llvm.riscv.vfirst.i32.nxv32i1(
-    <vscale x 32 x i1> %0,
-    i32 %1)
-
-  ret i32 %a
-}
-
-declare i32 @llvm.riscv.vfirst.mask.i32.nxv32i1(
-  <vscale x 32 x i1>,
-  <vscale x 32 x i1>,
-  i32);
-
-define i32 @intrinsic_vfirst_mask_m_i32_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfirst_mask_m_i32_nxv32i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vfirst.m a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv32i1(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i1> %1,
-    i32 %2)
-
-  ret i32 %a
-}
-
-declare i32 @llvm.riscv.vfirst.i32.nxv64i1(
-  <vscale x 64 x i1>,
-  i32);
-
-define i32 @intrinsic_vfirst_m_i32_nxv64i1(<vscale x 64 x i1> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfirst_m_i32_nxv64i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call i32 @llvm.riscv.vfirst.i32.nxv64i1(
-    <vscale x 64 x i1> %0,
-    i32 %1)
-
-  ret i32 %a
-}
-
-declare i32 @llvm.riscv.vfirst.mask.i32.nxv64i1(
-  <vscale x 64 x i1>,
-  <vscale x 64 x i1>,
-  i32);
-
-define i32 @intrinsic_vfirst_mask_m_i32_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfirst_mask_m_i32_nxv64i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vfirst.m a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv64i1(
-    <vscale x 64 x i1> %0,
-    <vscale x 64 x i1> %1,
-    i32 %2)
-
-  ret i32 %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfirst-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfirst-rv64.ll
deleted file mode 100644
index 0e20516a69c79..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfirst-rv64.ll
+++ /dev/null
@@ -1,282 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare i64 @llvm.riscv.vfirst.i64.nxv1i1(
-  <vscale x 1 x i1>,
-  i64);
-
-define i64 @intrinsic_vfirst_m_i64_nxv1i1(<vscale x 1 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv1i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call i64 @llvm.riscv.vfirst.i64.nxv1i1(
-    <vscale x 1 x i1> %0,
-    i64 %1)
-
-  ret i64 %a
-}
-
-declare i64 @llvm.riscv.vfirst.mask.i64.nxv1i1(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i1>,
-  i64);
-
-define i64 @intrinsic_vfirst_mask_m_i64_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv1i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vfirst.m a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call i64 @llvm.riscv.vfirst.mask.i64.nxv1i1(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i1> %1,
-    i64 %2)
-
-  ret i64 %a
-}
-
-declare i64 @llvm.riscv.vfirst.i64.nxv2i1(
-  <vscale x 2 x i1>,
-  i64);
-
-define i64 @intrinsic_vfirst_m_i64_nxv2i1(<vscale x 2 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv2i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call i64 @llvm.riscv.vfirst.i64.nxv2i1(
-    <vscale x 2 x i1> %0,
-    i64 %1)
-
-  ret i64 %a
-}
-
-declare i64 @llvm.riscv.vfirst.mask.i64.nxv2i1(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i1>,
-  i64);
-
-define i64 @intrinsic_vfirst_mask_m_i64_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv2i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vfirst.m a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call i64 @llvm.riscv.vfirst.mask.i64.nxv2i1(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i1> %1,
-    i64 %2)
-
-  ret i64 %a
-}
-
-declare i64 @llvm.riscv.vfirst.i64.nxv4i1(
-  <vscale x 4 x i1>,
-  i64);
-
-define i64 @intrinsic_vfirst_m_i64_nxv4i1(<vscale x 4 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv4i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call i64 @llvm.riscv.vfirst.i64.nxv4i1(
-    <vscale x 4 x i1> %0,
-    i64 %1)
-
-  ret i64 %a
-}
-
-declare i64 @llvm.riscv.vfirst.mask.i64.nxv4i1(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i1>,
-  i64);
-
-define i64 @intrinsic_vfirst_mask_m_i64_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv4i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vfirst.m a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call i64 @llvm.riscv.vfirst.mask.i64.nxv4i1(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i1> %1,
-    i64 %2)
-
-  ret i64 %a
-}
-
-declare i64 @llvm.riscv.vfirst.i64.nxv8i1(
-  <vscale x 8 x i1>,
-  i64);
-
-define i64 @intrinsic_vfirst_m_i64_nxv8i1(<vscale x 8 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv8i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call i64 @llvm.riscv.vfirst.i64.nxv8i1(
-    <vscale x 8 x i1> %0,
-    i64 %1)
-
-  ret i64 %a
-}
-
-declare i64 @llvm.riscv.vfirst.mask.i64.nxv8i1(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i1>,
-  i64);
-
-define i64 @intrinsic_vfirst_mask_m_i64_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv8i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vfirst.m a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call i64 @llvm.riscv.vfirst.mask.i64.nxv8i1(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i1> %1,
-    i64 %2)
-
-  ret i64 %a
-}
-
-declare i64 @llvm.riscv.vfirst.i64.nxv16i1(
-  <vscale x 16 x i1>,
-  i64);
-
-define i64 @intrinsic_vfirst_m_i64_nxv16i1(<vscale x 16 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv16i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call i64 @llvm.riscv.vfirst.i64.nxv16i1(
-    <vscale x 16 x i1> %0,
-    i64 %1)
-
-  ret i64 %a
-}
-
-declare i64 @llvm.riscv.vfirst.mask.i64.nxv16i1(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i1>,
-  i64);
-
-define i64 @intrinsic_vfirst_mask_m_i64_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv16i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vfirst.m a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call i64 @llvm.riscv.vfirst.mask.i64.nxv16i1(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i1> %1,
-    i64 %2)
-
-  ret i64 %a
-}
-
-declare i64 @llvm.riscv.vfirst.i64.nxv32i1(
-  <vscale x 32 x i1>,
-  i64);
-
-define i64 @intrinsic_vfirst_m_i64_nxv32i1(<vscale x 32 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv32i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call i64 @llvm.riscv.vfirst.i64.nxv32i1(
-    <vscale x 32 x i1> %0,
-    i64 %1)
-
-  ret i64 %a
-}
-
-declare i64 @llvm.riscv.vfirst.mask.i64.nxv32i1(
-  <vscale x 32 x i1>,
-  <vscale x 32 x i1>,
-  i64);
-
-define i64 @intrinsic_vfirst_mask_m_i64_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv32i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vfirst.m a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call i64 @llvm.riscv.vfirst.mask.i64.nxv32i1(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i1> %1,
-    i64 %2)
-
-  ret i64 %a
-}
-
-declare i64 @llvm.riscv.vfirst.i64.nxv64i1(
-  <vscale x 64 x i1>,
-  i64);
-
-define i64 @intrinsic_vfirst_m_i64_nxv64i1(<vscale x 64 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv64i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call i64 @llvm.riscv.vfirst.i64.nxv64i1(
-    <vscale x 64 x i1> %0,
-    i64 %1)
-
-  ret i64 %a
-}
-
-declare i64 @llvm.riscv.vfirst.mask.i64.nxv64i1(
-  <vscale x 64 x i1>,
-  <vscale x 64 x i1>,
-  i64);
-
-define i64 @intrinsic_vfirst_mask_m_i64_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv64i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vfirst.m a0, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call i64 @llvm.riscv.vfirst.mask.i64.nxv64i1(
-    <vscale x 64 x i1> %0,
-    <vscale x 64 x i1> %1,
-    i64 %2)
-
-  ret i64 %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfirst.ll b/llvm/test/CodeGen/RISCV/rvv/vfirst.ll
new file mode 100644
index 0000000000000..5d71c17db6c29
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfirst.ll
@@ -0,0 +1,284 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+declare iXLen @llvm.riscv.vfirst.iXLen.nxv1i1(
+  <vscale x 1 x i1>,
+  iXLen);
+
+define iXLen @intrinsic_vfirst_m_nxv1i1(<vscale x 1 x i1> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfirst_m_nxv1i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vfirst.m a0, v0
+; CHECK-NEXT:    ret
+entry:
+  %a = call iXLen @llvm.riscv.vfirst.iXLen.nxv1i1(
+    <vscale x 1 x i1> %0,
+    iXLen %1)
+
+  ret iXLen %a
+}
+
+declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv1i1(
+  <vscale x 1 x i1>,
+  <vscale x 1 x i1>,
+  iXLen);
+
+define iXLen @intrinsic_vfirst_mask_m_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv1i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmv1r.v v9, v0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vmv1r.v v0, v8
+; CHECK-NEXT:    vfirst.m a0, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call iXLen @llvm.riscv.vfirst.mask.iXLen.nxv1i1(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x i1> %1,
+    iXLen %2)
+
+  ret iXLen %a
+}
+
+declare iXLen @llvm.riscv.vfirst.iXLen.nxv2i1(
+  <vscale x 2 x i1>,
+  iXLen);
+
+define iXLen @intrinsic_vfirst_m_nxv2i1(<vscale x 2 x i1> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfirst_m_nxv2i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vfirst.m a0, v0
+; CHECK-NEXT:    ret
+entry:
+  %a = call iXLen @llvm.riscv.vfirst.iXLen.nxv2i1(
+    <vscale x 2 x i1> %0,
+    iXLen %1)
+
+  ret iXLen %a
+}
+
+declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv2i1(
+  <vscale x 2 x i1>,
+  <vscale x 2 x i1>,
+  iXLen);
+
+define iXLen @intrinsic_vfirst_mask_m_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv2i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmv1r.v v9, v0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vmv1r.v v0, v8
+; CHECK-NEXT:    vfirst.m a0, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call iXLen @llvm.riscv.vfirst.mask.iXLen.nxv2i1(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x i1> %1,
+    iXLen %2)
+
+  ret iXLen %a
+}
+
+declare iXLen @llvm.riscv.vfirst.iXLen.nxv4i1(
+  <vscale x 4 x i1>,
+  iXLen);
+
+define iXLen @intrinsic_vfirst_m_nxv4i1(<vscale x 4 x i1> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfirst_m_nxv4i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vfirst.m a0, v0
+; CHECK-NEXT:    ret
+entry:
+  %a = call iXLen @llvm.riscv.vfirst.iXLen.nxv4i1(
+    <vscale x 4 x i1> %0,
+    iXLen %1)
+
+  ret iXLen %a
+}
+
+declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv4i1(
+  <vscale x 4 x i1>,
+  <vscale x 4 x i1>,
+  iXLen);
+
+define iXLen @intrinsic_vfirst_mask_m_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv4i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmv1r.v v9, v0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vmv1r.v v0, v8
+; CHECK-NEXT:    vfirst.m a0, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call iXLen @llvm.riscv.vfirst.mask.iXLen.nxv4i1(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x i1> %1,
+    iXLen %2)
+
+  ret iXLen %a
+}
+
+declare iXLen @llvm.riscv.vfirst.iXLen.nxv8i1(
+  <vscale x 8 x i1>,
+  iXLen);
+
+define iXLen @intrinsic_vfirst_m_nxv8i1(<vscale x 8 x i1> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfirst_m_nxv8i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vfirst.m a0, v0
+; CHECK-NEXT:    ret
+entry:
+  %a = call iXLen @llvm.riscv.vfirst.iXLen.nxv8i1(
+    <vscale x 8 x i1> %0,
+    iXLen %1)
+
+  ret iXLen %a
+}
+
+declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv8i1(
+  <vscale x 8 x i1>,
+  <vscale x 8 x i1>,
+  iXLen);
+
+define iXLen @intrinsic_vfirst_mask_m_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv8i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmv1r.v v9, v0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vmv1r.v v0, v8
+; CHECK-NEXT:    vfirst.m a0, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call iXLen @llvm.riscv.vfirst.mask.iXLen.nxv8i1(
+    <vscale x 8 x i1> %0,
+    <vscale x 8 x i1> %1,
+    iXLen %2)
+
+  ret iXLen %a
+}
+
+declare iXLen @llvm.riscv.vfirst.iXLen.nxv16i1(
+  <vscale x 16 x i1>,
+  iXLen);
+
+define iXLen @intrinsic_vfirst_m_nxv16i1(<vscale x 16 x i1> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfirst_m_nxv16i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vfirst.m a0, v0
+; CHECK-NEXT:    ret
+entry:
+  %a = call iXLen @llvm.riscv.vfirst.iXLen.nxv16i1(
+    <vscale x 16 x i1> %0,
+    iXLen %1)
+
+  ret iXLen %a
+}
+
+declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv16i1(
+  <vscale x 16 x i1>,
+  <vscale x 16 x i1>,
+  iXLen);
+
+define iXLen @intrinsic_vfirst_mask_m_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv16i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmv1r.v v9, v0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vmv1r.v v0, v8
+; CHECK-NEXT:    vfirst.m a0, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call iXLen @llvm.riscv.vfirst.mask.iXLen.nxv16i1(
+    <vscale x 16 x i1> %0,
+    <vscale x 16 x i1> %1,
+    iXLen %2)
+
+  ret iXLen %a
+}
+
+declare iXLen @llvm.riscv.vfirst.iXLen.nxv32i1(
+  <vscale x 32 x i1>,
+  iXLen);
+
+define iXLen @intrinsic_vfirst_m_nxv32i1(<vscale x 32 x i1> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfirst_m_nxv32i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    vfirst.m a0, v0
+; CHECK-NEXT:    ret
+entry:
+  %a = call iXLen @llvm.riscv.vfirst.iXLen.nxv32i1(
+    <vscale x 32 x i1> %0,
+    iXLen %1)
+
+  ret iXLen %a
+}
+
+declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv32i1(
+  <vscale x 32 x i1>,
+  <vscale x 32 x i1>,
+  iXLen);
+
+define iXLen @intrinsic_vfirst_mask_m_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv32i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmv1r.v v9, v0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    vmv1r.v v0, v8
+; CHECK-NEXT:    vfirst.m a0, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call iXLen @llvm.riscv.vfirst.mask.iXLen.nxv32i1(
+    <vscale x 32 x i1> %0,
+    <vscale x 32 x i1> %1,
+    iXLen %2)
+
+  ret iXLen %a
+}
+
+declare iXLen @llvm.riscv.vfirst.iXLen.nxv64i1(
+  <vscale x 64 x i1>,
+  iXLen);
+
+define iXLen @intrinsic_vfirst_m_nxv64i1(<vscale x 64 x i1> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfirst_m_nxv64i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT:    vfirst.m a0, v0
+; CHECK-NEXT:    ret
+entry:
+  %a = call iXLen @llvm.riscv.vfirst.iXLen.nxv64i1(
+    <vscale x 64 x i1> %0,
+    iXLen %1)
+
+  ret iXLen %a
+}
+
+declare iXLen @llvm.riscv.vfirst.mask.iXLen.nxv64i1(
+  <vscale x 64 x i1>,
+  <vscale x 64 x i1>,
+  iXLen);
+
+define iXLen @intrinsic_vfirst_mask_m_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv64i1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmv1r.v v9, v0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT:    vmv1r.v v0, v8
+; CHECK-NEXT:    vfirst.m a0, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call iXLen @llvm.riscv.vfirst.mask.iXLen.nxv64i1(
+    <vscale x 64 x i1> %0,
+    <vscale x 64 x i1> %1,
+    iXLen %2)
+
+  ret iXLen %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vid-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vid-rv64.ll
deleted file mode 100644
index 1413c0ec4b443..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vid-rv64.ll
+++ /dev/null
@@ -1,758 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8(
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vid_v_nxv1i8(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vid_v_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8(
-    i64 %0)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vid_mask_v_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vid.nxv2i8(
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vid_v_nxv2i8(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vid_v_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vid.nxv2i8(
-    i64 %0)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vid.mask.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vid_mask_v_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
-; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vid.mask.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vid.nxv4i8(
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vid_v_nxv4i8(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vid_v_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vid.nxv4i8(
-    i64 %0)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vid.mask.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vid_mask_v_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
-; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vid.mask.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vid.nxv8i8(
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vid_v_nxv8i8(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vid_v_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vid.nxv8i8(
-    i64 %0)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vid.mask.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vid_mask_v_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
-; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vid.mask.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vid.nxv16i8(
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vid_v_nxv16i8(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vid_v_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vid.nxv16i8(
-    i64 %0)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vid.mask.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vid_mask_v_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
-; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vid.mask.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vid.nxv32i8(
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vid_v_nxv32i8(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vid_v_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vid.nxv32i8(
-    i64 %0)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vid.mask.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vid_mask_v_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vid_mask_v_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
-; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vid.mask.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vid.nxv1i16(
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vid_v_nxv1i16(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vid_v_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vid.nxv1i16(
-    i64 %0)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vid.mask.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vid_mask_v_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vid.mask.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vid.nxv2i16(
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vid_v_nxv2i16(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vid_v_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vid.nxv2i16(
-    i64 %0)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vid.mask.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vid_mask_v_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vid.mask.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vid.nxv4i16(
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vid_v_nxv4i16(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vid_v_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vid.nxv4i16(
-    i64 %0)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vid.mask.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vid_mask_v_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vid.mask.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vid.nxv8i16(
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vid_v_nxv8i16(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vid_v_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vid.nxv8i16(
-    i64 %0)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vid.mask.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vid_mask_v_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vid.mask.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vid.nxv16i16(
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vid_v_nxv16i16(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vid_v_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vid.nxv16i16(
-    i64 %0)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vid.mask.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vid_mask_v_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vid.mask.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vid.nxv32i16(
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vid_v_nxv32i16(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vid_v_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vid.nxv32i16(
-    i64 %0)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vid.mask.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vid_mask_v_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vid_mask_v_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
-; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vid.mask.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vid.nxv1i32(
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vid_v_nxv1i32(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vid_v_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vid.nxv1i32(
-    i64 %0)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vid_mask_v_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32(
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vid_v_nxv2i32(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vid_v_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32(
-    i64 %0)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vid.mask.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vid_mask_v_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vid.mask.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vid.nxv4i32(
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vid_v_nxv4i32(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vid_v_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vid.nxv4i32(
-    i64 %0)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vid.mask.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vid_mask_v_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vid.mask.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vid.nxv8i32(
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vid_v_nxv8i32(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vid_v_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vid.nxv8i32(
-    i64 %0)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vid.mask.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vid_mask_v_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vid.mask.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vid.nxv16i32(
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vid_v_nxv16i32(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vid_v_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vid.nxv16i32(
-    i64 %0)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vid.mask.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vid_mask_v_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
-; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vid.mask.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vid.nxv1i64(
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vid_v_nxv1i64(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vid_v_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vid.nxv1i64(
-    i64 %0)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vid.mask.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vid_mask_v_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vid.mask.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vid.nxv2i64(
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vid_v_nxv2i64(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vid_v_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vid.nxv2i64(
-    i64 %0)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vid.mask.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vid_mask_v_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vid.mask.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vid.nxv4i64(
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vid_v_nxv4i64(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vid_v_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vid.nxv4i64(
-    i64 %0)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vid.mask.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vid_mask_v_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vid.mask.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64(
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vid_v_nxv8i64(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vid_v_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64(
-    i64 %0)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vid.mask.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vid_mask_v_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
-; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vid.mask.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vid-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vid.ll
similarity index 81%
rename from llvm/test/CodeGen/RISCV/rvv/vid-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vid.ll
index 58ed5cfb7620d..1888074767cf8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vid-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vid.ll
@@ -1,10 +1,12 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8(
-  i32);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vid_v_nxv1i8(i32 %0) nounwind {
+define <vscale x 1 x i8> @intrinsic_vid_v_nxv1i8(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vid_v_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -12,7 +14,7 @@ define <vscale x 1 x i8> @intrinsic_vid_v_nxv1i8(i32 %0) nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8(
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -20,9 +22,9 @@ entry:
 declare <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vid_mask_v_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vid_mask_v_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
@@ -32,15 +34,15 @@ entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
 
 declare <vscale x 2 x i8> @llvm.riscv.vid.nxv2i8(
-  i32);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vid_v_nxv2i8(i32 %0) nounwind {
+define <vscale x 2 x i8> @intrinsic_vid_v_nxv2i8(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vid_v_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -48,7 +50,7 @@ define <vscale x 2 x i8> @intrinsic_vid_v_nxv2i8(i32 %0) nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vid.nxv2i8(
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -56,9 +58,9 @@ entry:
 declare <vscale x 2 x i8> @llvm.riscv.vid.mask.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vid_mask_v_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vid_mask_v_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
@@ -68,15 +70,15 @@ entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vid.mask.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
 
 declare <vscale x 4 x i8> @llvm.riscv.vid.nxv4i8(
-  i32);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vid_v_nxv4i8(i32 %0) nounwind {
+define <vscale x 4 x i8> @intrinsic_vid_v_nxv4i8(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vid_v_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -84,7 +86,7 @@ define <vscale x 4 x i8> @intrinsic_vid_v_nxv4i8(i32 %0) nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vid.nxv4i8(
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -92,9 +94,9 @@ entry:
 declare <vscale x 4 x i8> @llvm.riscv.vid.mask.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vid_mask_v_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vid_mask_v_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
@@ -104,15 +106,15 @@ entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vid.mask.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
 
 declare <vscale x 8 x i8> @llvm.riscv.vid.nxv8i8(
-  i32);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vid_v_nxv8i8(i32 %0) nounwind {
+define <vscale x 8 x i8> @intrinsic_vid_v_nxv8i8(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vid_v_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -120,7 +122,7 @@ define <vscale x 8 x i8> @intrinsic_vid_v_nxv8i8(i32 %0) nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vid.nxv8i8(
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -128,9 +130,9 @@ entry:
 declare <vscale x 8 x i8> @llvm.riscv.vid.mask.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vid_mask_v_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vid_mask_v_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
@@ -140,15 +142,15 @@ entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vid.mask.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
 
 declare <vscale x 16 x i8> @llvm.riscv.vid.nxv16i8(
-  i32);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vid_v_nxv16i8(i32 %0) nounwind {
+define <vscale x 16 x i8> @intrinsic_vid_v_nxv16i8(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vid_v_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -156,7 +158,7 @@ define <vscale x 16 x i8> @intrinsic_vid_v_nxv16i8(i32 %0) nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vid.nxv16i8(
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -164,9 +166,9 @@ entry:
 declare <vscale x 16 x i8> @llvm.riscv.vid.mask.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vid_mask_v_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vid_mask_v_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
@@ -176,15 +178,15 @@ entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vid.mask.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
 
 declare <vscale x 32 x i8> @llvm.riscv.vid.nxv32i8(
-  i32);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vid_v_nxv32i8(i32 %0) nounwind {
+define <vscale x 32 x i8> @intrinsic_vid_v_nxv32i8(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vid_v_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -192,7 +194,7 @@ define <vscale x 32 x i8> @intrinsic_vid_v_nxv32i8(i32 %0) nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vid.nxv32i8(
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -200,9 +202,9 @@ entry:
 declare <vscale x 32 x i8> @llvm.riscv.vid.mask.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vid_mask_v_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vid_mask_v_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
@@ -212,15 +214,15 @@ entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vid.mask.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
 
 declare <vscale x 1 x i16> @llvm.riscv.vid.nxv1i16(
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vid_v_nxv1i16(i32 %0) nounwind {
+define <vscale x 1 x i16> @intrinsic_vid_v_nxv1i16(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vid_v_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -228,7 +230,7 @@ define <vscale x 1 x i16> @intrinsic_vid_v_nxv1i16(i32 %0) nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vid.nxv1i16(
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -236,9 +238,9 @@ entry:
 declare <vscale x 1 x i16> @llvm.riscv.vid.mask.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vid_mask_v_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vid_mask_v_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -248,15 +250,15 @@ entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vid.mask.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
 
 declare <vscale x 2 x i16> @llvm.riscv.vid.nxv2i16(
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vid_v_nxv2i16(i32 %0) nounwind {
+define <vscale x 2 x i16> @intrinsic_vid_v_nxv2i16(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vid_v_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -264,7 +266,7 @@ define <vscale x 2 x i16> @intrinsic_vid_v_nxv2i16(i32 %0) nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vid.nxv2i16(
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -272,9 +274,9 @@ entry:
 declare <vscale x 2 x i16> @llvm.riscv.vid.mask.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vid_mask_v_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vid_mask_v_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -284,15 +286,15 @@ entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vid.mask.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
 
 declare <vscale x 4 x i16> @llvm.riscv.vid.nxv4i16(
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vid_v_nxv4i16(i32 %0) nounwind {
+define <vscale x 4 x i16> @intrinsic_vid_v_nxv4i16(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vid_v_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -300,7 +302,7 @@ define <vscale x 4 x i16> @intrinsic_vid_v_nxv4i16(i32 %0) nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vid.nxv4i16(
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -308,9 +310,9 @@ entry:
 declare <vscale x 4 x i16> @llvm.riscv.vid.mask.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vid_mask_v_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vid_mask_v_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -320,15 +322,15 @@ entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vid.mask.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
 
 declare <vscale x 8 x i16> @llvm.riscv.vid.nxv8i16(
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vid_v_nxv8i16(i32 %0) nounwind {
+define <vscale x 8 x i16> @intrinsic_vid_v_nxv8i16(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vid_v_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -336,7 +338,7 @@ define <vscale x 8 x i16> @intrinsic_vid_v_nxv8i16(i32 %0) nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vid.nxv8i16(
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -344,9 +346,9 @@ entry:
 declare <vscale x 8 x i16> @llvm.riscv.vid.mask.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vid_mask_v_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vid_mask_v_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -356,15 +358,15 @@ entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vid.mask.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
 
 declare <vscale x 16 x i16> @llvm.riscv.vid.nxv16i16(
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vid_v_nxv16i16(i32 %0) nounwind {
+define <vscale x 16 x i16> @intrinsic_vid_v_nxv16i16(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vid_v_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -372,7 +374,7 @@ define <vscale x 16 x i16> @intrinsic_vid_v_nxv16i16(i32 %0) nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vid.nxv16i16(
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -380,9 +382,9 @@ entry:
 declare <vscale x 16 x i16> @llvm.riscv.vid.mask.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vid_mask_v_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vid_mask_v_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -392,15 +394,15 @@ entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vid.mask.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
 
 declare <vscale x 32 x i16> @llvm.riscv.vid.nxv32i16(
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vid_v_nxv32i16(i32 %0) nounwind {
+define <vscale x 32 x i16> @intrinsic_vid_v_nxv32i16(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vid_v_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -408,7 +410,7 @@ define <vscale x 32 x i16> @intrinsic_vid_v_nxv32i16(i32 %0) nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vid.nxv32i16(
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 32 x i16> %a
 }
@@ -416,9 +418,9 @@ entry:
 declare <vscale x 32 x i16> @llvm.riscv.vid.mask.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vid_mask_v_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vid_mask_v_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
@@ -428,15 +430,15 @@ entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vid.mask.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
 
 declare <vscale x 1 x i32> @llvm.riscv.vid.nxv1i32(
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vid_v_nxv1i32(i32 %0) nounwind {
+define <vscale x 1 x i32> @intrinsic_vid_v_nxv1i32(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vid_v_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -444,7 +446,7 @@ define <vscale x 1 x i32> @intrinsic_vid_v_nxv1i32(i32 %0) nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vid.nxv1i32(
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -452,9 +454,9 @@ entry:
 declare <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vid_mask_v_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vid_mask_v_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -464,15 +466,15 @@ entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
 
 declare <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32(
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vid_v_nxv2i32(i32 %0) nounwind {
+define <vscale x 2 x i32> @intrinsic_vid_v_nxv2i32(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vid_v_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -480,7 +482,7 @@ define <vscale x 2 x i32> @intrinsic_vid_v_nxv2i32(i32 %0) nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32(
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -488,9 +490,9 @@ entry:
 declare <vscale x 2 x i32> @llvm.riscv.vid.mask.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vid_mask_v_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vid_mask_v_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -500,15 +502,15 @@ entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vid.mask.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
 
 declare <vscale x 4 x i32> @llvm.riscv.vid.nxv4i32(
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vid_v_nxv4i32(i32 %0) nounwind {
+define <vscale x 4 x i32> @intrinsic_vid_v_nxv4i32(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vid_v_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -516,7 +518,7 @@ define <vscale x 4 x i32> @intrinsic_vid_v_nxv4i32(i32 %0) nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vid.nxv4i32(
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -524,9 +526,9 @@ entry:
 declare <vscale x 4 x i32> @llvm.riscv.vid.mask.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vid_mask_v_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vid_mask_v_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -536,15 +538,15 @@ entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vid.mask.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
 
 declare <vscale x 8 x i32> @llvm.riscv.vid.nxv8i32(
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vid_v_nxv8i32(i32 %0) nounwind {
+define <vscale x 8 x i32> @intrinsic_vid_v_nxv8i32(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vid_v_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -552,7 +554,7 @@ define <vscale x 8 x i32> @intrinsic_vid_v_nxv8i32(i32 %0) nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vid.nxv8i32(
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -560,9 +562,9 @@ entry:
 declare <vscale x 8 x i32> @llvm.riscv.vid.mask.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vid_mask_v_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vid_mask_v_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -572,15 +574,15 @@ entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vid.mask.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
 
 declare <vscale x 16 x i32> @llvm.riscv.vid.nxv16i32(
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vid_v_nxv16i32(i32 %0) nounwind {
+define <vscale x 16 x i32> @intrinsic_vid_v_nxv16i32(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vid_v_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -588,7 +590,7 @@ define <vscale x 16 x i32> @intrinsic_vid_v_nxv16i32(i32 %0) nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vid.nxv16i32(
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 16 x i32> %a
 }
@@ -596,9 +598,9 @@ entry:
 declare <vscale x 16 x i32> @llvm.riscv.vid.mask.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vid_mask_v_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vid_mask_v_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
@@ -608,15 +610,15 @@ entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vid.mask.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
 
 declare <vscale x 1 x i64> @llvm.riscv.vid.nxv1i64(
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vid_v_nxv1i64(i32 %0) nounwind {
+define <vscale x 1 x i64> @intrinsic_vid_v_nxv1i64(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vid_v_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -624,7 +626,7 @@ define <vscale x 1 x i64> @intrinsic_vid_v_nxv1i64(i32 %0) nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vid.nxv1i64(
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -632,9 +634,9 @@ entry:
 declare <vscale x 1 x i64> @llvm.riscv.vid.mask.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vid_mask_v_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vid_mask_v_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -644,15 +646,15 @@ entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vid.mask.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
 
 declare <vscale x 2 x i64> @llvm.riscv.vid.nxv2i64(
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vid_v_nxv2i64(i32 %0) nounwind {
+define <vscale x 2 x i64> @intrinsic_vid_v_nxv2i64(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vid_v_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -660,7 +662,7 @@ define <vscale x 2 x i64> @intrinsic_vid_v_nxv2i64(i32 %0) nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vid.nxv2i64(
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -668,9 +670,9 @@ entry:
 declare <vscale x 2 x i64> @llvm.riscv.vid.mask.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vid_mask_v_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vid_mask_v_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -680,15 +682,15 @@ entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vid.mask.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
 
 declare <vscale x 4 x i64> @llvm.riscv.vid.nxv4i64(
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vid_v_nxv4i64(i32 %0) nounwind {
+define <vscale x 4 x i64> @intrinsic_vid_v_nxv4i64(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vid_v_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -696,7 +698,7 @@ define <vscale x 4 x i64> @intrinsic_vid_v_nxv4i64(i32 %0) nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vid.nxv4i64(
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -704,9 +706,9 @@ entry:
 declare <vscale x 4 x i64> @llvm.riscv.vid.mask.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vid_mask_v_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vid_mask_v_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -716,15 +718,15 @@ entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vid.mask.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
 
 declare <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64(
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vid_v_nxv8i64(i32 %0) nounwind {
+define <vscale x 8 x i64> @intrinsic_vid_v_nxv8i64(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vid_v_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -732,7 +734,7 @@ define <vscale x 8 x i64> @intrinsic_vid_v_nxv8i64(i32 %0) nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64(
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 8 x i64> %a
 }
@@ -740,9 +742,9 @@ entry:
 declare <vscale x 8 x i64> @llvm.riscv.vid.mask.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vid_mask_v_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vid_mask_v_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
@@ -752,7 +754,7 @@ entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vid.mask.nxv8i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/viota-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/viota-rv64.ll
deleted file mode 100644
index 0e37ff483fb4f..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/viota-rv64.ll
+++ /dev/null
@@ -1,882 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8(
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_viota_m_nxv1i8_nxv1i1(<vscale x 1 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_viota_m_nxv1i8_nxv1i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8(
-    <vscale x 1 x i1> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_viota_mask_m_nxv1i8_nxv1i1(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i1> %1,
-    <vscale x 1 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.viota.nxv2i8(
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_viota_m_nxv2i8_nxv2i1(<vscale x 2 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_viota_m_nxv2i8_nxv2i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.viota.nxv2i8(
-    <vscale x 2 x i1> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.viota.mask.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_viota_mask_m_nxv2i8_nxv2i1(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i8_nxv2i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
-; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.viota.mask.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i1> %1,
-    <vscale x 2 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.viota.nxv4i8(
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_viota_m_nxv4i8_nxv4i1(<vscale x 4 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_viota_m_nxv4i8_nxv4i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.viota.nxv4i8(
-    <vscale x 4 x i1> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.viota.mask.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_viota_mask_m_nxv4i8_nxv4i1(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i8_nxv4i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
-; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.viota.mask.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i1> %1,
-    <vscale x 4 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.viota.nxv8i8(
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_viota_m_nxv8i8_nxv8i1(<vscale x 8 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_viota_m_nxv8i8_nxv8i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.viota.nxv8i8(
-    <vscale x 8 x i1> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.viota.mask.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_viota_mask_m_nxv8i8_nxv8i1(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i8_nxv8i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
-; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.viota.mask.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i1> %1,
-    <vscale x 8 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.viota.nxv16i8(
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_viota_m_nxv16i8_nxv16i1(<vscale x 16 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_viota_m_nxv16i8_nxv16i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.viota.nxv16i8(
-    <vscale x 16 x i1> %0,
-    i64 %1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.viota.mask.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_viota_mask_m_nxv16i8_nxv16i1(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i8_nxv16i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
-; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.viota.mask.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i1> %1,
-    <vscale x 16 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.viota.nxv32i8(
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_viota_m_nxv32i8_nxv32i1(<vscale x 32 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_viota_m_nxv32i8_nxv32i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.viota.nxv32i8(
-    <vscale x 32 x i1> %0,
-    i64 %1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.viota.mask.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_viota_mask_m_nxv32i8_nxv32i1(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_viota_mask_m_nxv32i8_nxv32i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
-; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.viota.mask.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i1> %1,
-    <vscale x 32 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.viota.nxv64i8(
-  <vscale x 64 x i1>,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_viota_m_nxv64i8_nxv64i1(<vscale x 64 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_viota_m_nxv64i8_nxv64i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.viota.nxv64i8(
-    <vscale x 64 x i1> %0,
-    i64 %1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.viota.mask.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i1>,
-  <vscale x 64 x i1>,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_viota_mask_m_nxv64i8_nxv64i1(<vscale x 64 x i8> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_viota_mask_m_nxv64i8_nxv64i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, tu, mu
-; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.viota.mask.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i1> %1,
-    <vscale x 64 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.viota.nxv1i16(
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_viota_m_nxv1i16_nxv1i1(<vscale x 1 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_viota_m_nxv1i16_nxv1i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.viota.nxv1i16(
-    <vscale x 1 x i1> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.viota.mask.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_viota_mask_m_nxv1i16_nxv1i1(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i16_nxv1i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.viota.mask.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i1> %1,
-    <vscale x 1 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.viota.nxv2i16(
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_viota_m_nxv2i16_nxv2i1(<vscale x 2 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_viota_m_nxv2i16_nxv2i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.viota.nxv2i16(
-    <vscale x 2 x i1> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.viota.mask.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_viota_mask_m_nxv2i16_nxv2i1(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i16_nxv2i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.viota.mask.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i1> %1,
-    <vscale x 2 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.viota.nxv4i16(
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_viota_m_nxv4i16_nxv4i1(<vscale x 4 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_viota_m_nxv4i16_nxv4i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.viota.nxv4i16(
-    <vscale x 4 x i1> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.viota.mask.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_viota_mask_m_nxv4i16_nxv4i1(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i16_nxv4i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.viota.mask.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i1> %1,
-    <vscale x 4 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.viota.nxv8i16(
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_viota_m_nxv8i16_nxv8i1(<vscale x 8 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_viota_m_nxv8i16_nxv8i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.viota.nxv8i16(
-    <vscale x 8 x i1> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.viota.mask.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_viota_mask_m_nxv8i16_nxv8i1(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i16_nxv8i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.viota.mask.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i1> %1,
-    <vscale x 8 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.viota.nxv16i16(
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_viota_m_nxv16i16_nxv16i1(<vscale x 16 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_viota_m_nxv16i16_nxv16i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.viota.nxv16i16(
-    <vscale x 16 x i1> %0,
-    i64 %1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.viota.mask.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_viota_mask_m_nxv16i16_nxv16i1(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i16_nxv16i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.viota.mask.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i1> %1,
-    <vscale x 16 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.viota.nxv32i16(
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_viota_m_nxv32i16_nxv32i1(<vscale x 32 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_viota_m_nxv32i16_nxv32i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.viota.nxv32i16(
-    <vscale x 32 x i1> %0,
-    i64 %1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.viota.mask.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i1>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_viota_mask_m_nxv32i16_nxv32i1(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_viota_mask_m_nxv32i16_nxv32i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
-; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.viota.mask.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i1> %1,
-    <vscale x 32 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.viota.nxv1i32(
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_viota_m_nxv1i32_nxv1i1(<vscale x 1 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_viota_m_nxv1i32_nxv1i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.viota.nxv1i32(
-    <vscale x 1 x i1> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_viota_mask_m_nxv1i32_nxv1i1(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i32_nxv1i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i1> %1,
-    <vscale x 1 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32(
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_viota_m_nxv2i32_nxv2i1(<vscale x 2 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_viota_m_nxv2i32_nxv2i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32(
-    <vscale x 2 x i1> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.viota.mask.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_viota_mask_m_nxv2i32_nxv2i1(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i32_nxv2i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.viota.mask.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i1> %1,
-    <vscale x 2 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.viota.nxv4i32(
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_viota_m_nxv4i32_nxv4i1(<vscale x 4 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_viota_m_nxv4i32_nxv4i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.viota.nxv4i32(
-    <vscale x 4 x i1> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.viota.mask.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_viota_mask_m_nxv4i32_nxv4i1(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i32_nxv4i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.viota.mask.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i1> %1,
-    <vscale x 4 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.viota.nxv8i32(
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_viota_m_nxv8i32_nxv8i1(<vscale x 8 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_viota_m_nxv8i32_nxv8i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.viota.nxv8i32(
-    <vscale x 8 x i1> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.viota.mask.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_viota_mask_m_nxv8i32_nxv8i1(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i32_nxv8i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.viota.mask.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i1> %1,
-    <vscale x 8 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.viota.nxv16i32(
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_viota_m_nxv16i32_nxv16i1(<vscale x 16 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_viota_m_nxv16i32_nxv16i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.viota.nxv16i32(
-    <vscale x 16 x i1> %0,
-    i64 %1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.viota.mask.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_viota_mask_m_nxv16i32_nxv16i1(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i32_nxv16i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
-; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.viota.mask.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i1> %1,
-    <vscale x 16 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.viota.nxv1i64(
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_viota_m_nxv1i64_nxv1i1(<vscale x 1 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_viota_m_nxv1i64_nxv1i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.viota.nxv1i64(
-    <vscale x 1 x i1> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.viota.mask.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_viota_mask_m_nxv1i64_nxv1i1(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i64_nxv1i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.viota.mask.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i1> %1,
-    <vscale x 1 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.viota.nxv2i64(
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_viota_m_nxv2i64_nxv2i1(<vscale x 2 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_viota_m_nxv2i64_nxv2i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.viota.nxv2i64(
-    <vscale x 2 x i1> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.viota.mask.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_viota_mask_m_nxv2i64_nxv2i1(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i64_nxv2i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.viota.mask.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i1> %1,
-    <vscale x 2 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.viota.nxv4i64(
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_viota_m_nxv4i64_nxv4i1(<vscale x 4 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_viota_m_nxv4i64_nxv4i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.viota.nxv4i64(
-    <vscale x 4 x i1> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.viota.mask.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_viota_mask_m_nxv4i64_nxv4i1(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i64_nxv4i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.viota.mask.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i1> %1,
-    <vscale x 4 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.viota.nxv8i64(
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_viota_m_nxv8i64_nxv8i1(<vscale x 8 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_viota_m_nxv8i64_nxv8i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.viota.nxv8i64(
-    <vscale x 8 x i1> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.viota.mask.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_viota_mask_m_nxv8i64_nxv8i1(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i64_nxv8i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
-; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.viota.mask.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i1> %1,
-    <vscale x 8 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/viota-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/viota.ll
similarity index 86%
rename from llvm/test/CodeGen/RISCV/rvv/viota-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/viota.ll
index 6b3838cd5898c..acec6f7c8a08f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/viota-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/viota.ll
@@ -1,11 +1,13 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8(
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_viota_m_nxv1i8_nxv1i1(<vscale x 1 x i1> %0, i32 %1) nounwind {
+define <vscale x 1 x i8> @intrinsic_viota_m_nxv1i8_nxv1i1(<vscale x 1 x i1> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_viota_m_nxv1i8_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -14,7 +16,7 @@ define <vscale x 1 x i8> @intrinsic_viota_m_nxv1i8_nxv1i1(<vscale x 1 x i1> %0,
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8(
     <vscale x 1 x i1> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -23,9 +25,9 @@ declare <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_viota_mask_m_nxv1i8_nxv1i1(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_viota_mask_m_nxv1i8_nxv1i1(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
@@ -36,16 +38,16 @@ entry:
     <vscale x 1 x i8> %0,
     <vscale x 1 x i1> %1,
     <vscale x 1 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
 
 declare <vscale x 2 x i8> @llvm.riscv.viota.nxv2i8(
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_viota_m_nxv2i8_nxv2i1(<vscale x 2 x i1> %0, i32 %1) nounwind {
+define <vscale x 2 x i8> @intrinsic_viota_m_nxv2i8_nxv2i1(<vscale x 2 x i1> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_viota_m_nxv2i8_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -54,7 +56,7 @@ define <vscale x 2 x i8> @intrinsic_viota_m_nxv2i8_nxv2i1(<vscale x 2 x i1> %0,
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.viota.nxv2i8(
     <vscale x 2 x i1> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -63,9 +65,9 @@ declare <vscale x 2 x i8> @llvm.riscv.viota.mask.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_viota_mask_m_nxv2i8_nxv2i1(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_viota_mask_m_nxv2i8_nxv2i1(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i8_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
@@ -76,16 +78,16 @@ entry:
     <vscale x 2 x i8> %0,
     <vscale x 2 x i1> %1,
     <vscale x 2 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
 
 declare <vscale x 4 x i8> @llvm.riscv.viota.nxv4i8(
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_viota_m_nxv4i8_nxv4i1(<vscale x 4 x i1> %0, i32 %1) nounwind {
+define <vscale x 4 x i8> @intrinsic_viota_m_nxv4i8_nxv4i1(<vscale x 4 x i1> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_viota_m_nxv4i8_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -94,7 +96,7 @@ define <vscale x 4 x i8> @intrinsic_viota_m_nxv4i8_nxv4i1(<vscale x 4 x i1> %0,
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.viota.nxv4i8(
     <vscale x 4 x i1> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -103,9 +105,9 @@ declare <vscale x 4 x i8> @llvm.riscv.viota.mask.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_viota_mask_m_nxv4i8_nxv4i1(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_viota_mask_m_nxv4i8_nxv4i1(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i8_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
@@ -116,16 +118,16 @@ entry:
     <vscale x 4 x i8> %0,
     <vscale x 4 x i1> %1,
     <vscale x 4 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
 
 declare <vscale x 8 x i8> @llvm.riscv.viota.nxv8i8(
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_viota_m_nxv8i8_nxv8i1(<vscale x 8 x i1> %0, i32 %1) nounwind {
+define <vscale x 8 x i8> @intrinsic_viota_m_nxv8i8_nxv8i1(<vscale x 8 x i1> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_viota_m_nxv8i8_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -134,7 +136,7 @@ define <vscale x 8 x i8> @intrinsic_viota_m_nxv8i8_nxv8i1(<vscale x 8 x i1> %0,
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.viota.nxv8i8(
     <vscale x 8 x i1> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -143,9 +145,9 @@ declare <vscale x 8 x i8> @llvm.riscv.viota.mask.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_viota_mask_m_nxv8i8_nxv8i1(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_viota_mask_m_nxv8i8_nxv8i1(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i8_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
@@ -156,16 +158,16 @@ entry:
     <vscale x 8 x i8> %0,
     <vscale x 8 x i1> %1,
     <vscale x 8 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
 
 declare <vscale x 16 x i8> @llvm.riscv.viota.nxv16i8(
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_viota_m_nxv16i8_nxv16i1(<vscale x 16 x i1> %0, i32 %1) nounwind {
+define <vscale x 16 x i8> @intrinsic_viota_m_nxv16i8_nxv16i1(<vscale x 16 x i1> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_viota_m_nxv16i8_nxv16i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -174,7 +176,7 @@ define <vscale x 16 x i8> @intrinsic_viota_m_nxv16i8_nxv16i1(<vscale x 16 x i1>
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.viota.nxv16i8(
     <vscale x 16 x i1> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -183,9 +185,9 @@ declare <vscale x 16 x i8> @llvm.riscv.viota.mask.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_viota_mask_m_nxv16i8_nxv16i1(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_viota_mask_m_nxv16i8_nxv16i1(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i8_nxv16i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
@@ -196,16 +198,16 @@ entry:
     <vscale x 16 x i8> %0,
     <vscale x 16 x i1> %1,
     <vscale x 16 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
 
 declare <vscale x 32 x i8> @llvm.riscv.viota.nxv32i8(
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_viota_m_nxv32i8_nxv32i1(<vscale x 32 x i1> %0, i32 %1) nounwind {
+define <vscale x 32 x i8> @intrinsic_viota_m_nxv32i8_nxv32i1(<vscale x 32 x i1> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_viota_m_nxv32i8_nxv32i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -214,7 +216,7 @@ define <vscale x 32 x i8> @intrinsic_viota_m_nxv32i8_nxv32i1(<vscale x 32 x i1>
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.viota.nxv32i8(
     <vscale x 32 x i1> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -223,9 +225,9 @@ declare <vscale x 32 x i8> @llvm.riscv.viota.mask.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_viota_mask_m_nxv32i8_nxv32i1(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_viota_mask_m_nxv32i8_nxv32i1(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv32i8_nxv32i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
@@ -236,16 +238,16 @@ entry:
     <vscale x 32 x i8> %0,
     <vscale x 32 x i1> %1,
     <vscale x 32 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
 
 declare <vscale x 64 x i8> @llvm.riscv.viota.nxv64i8(
   <vscale x 64 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_viota_m_nxv64i8_nxv64i1(<vscale x 64 x i1> %0, i32 %1) nounwind {
+define <vscale x 64 x i8> @intrinsic_viota_m_nxv64i8_nxv64i1(<vscale x 64 x i1> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_viota_m_nxv64i8_nxv64i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
@@ -254,7 +256,7 @@ define <vscale x 64 x i8> @intrinsic_viota_m_nxv64i8_nxv64i1(<vscale x 64 x i1>
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.viota.nxv64i8(
     <vscale x 64 x i1> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 64 x i8> %a
 }
@@ -263,9 +265,9 @@ declare <vscale x 64 x i8> @llvm.riscv.viota.mask.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i1>,
   <vscale x 64 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_viota_mask_m_nxv64i8_nxv64i1(<vscale x 64 x i8> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_viota_mask_m_nxv64i8_nxv64i1(<vscale x 64 x i8> %0, <vscale x 64 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv64i8_nxv64i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, tu, mu
@@ -276,16 +278,16 @@ entry:
     <vscale x 64 x i8> %0,
     <vscale x 64 x i1> %1,
     <vscale x 64 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i8> %a
 }
 
 declare <vscale x 1 x i16> @llvm.riscv.viota.nxv1i16(
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_viota_m_nxv1i16_nxv1i1(<vscale x 1 x i1> %0, i32 %1) nounwind {
+define <vscale x 1 x i16> @intrinsic_viota_m_nxv1i16_nxv1i1(<vscale x 1 x i1> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_viota_m_nxv1i16_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -294,7 +296,7 @@ define <vscale x 1 x i16> @intrinsic_viota_m_nxv1i16_nxv1i1(<vscale x 1 x i1> %0
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.viota.nxv1i16(
     <vscale x 1 x i1> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -303,9 +305,9 @@ declare <vscale x 1 x i16> @llvm.riscv.viota.mask.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_viota_mask_m_nxv1i16_nxv1i1(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_viota_mask_m_nxv1i16_nxv1i1(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i16_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -316,16 +318,16 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x i1> %1,
     <vscale x 1 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
 
 declare <vscale x 2 x i16> @llvm.riscv.viota.nxv2i16(
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_viota_m_nxv2i16_nxv2i1(<vscale x 2 x i1> %0, i32 %1) nounwind {
+define <vscale x 2 x i16> @intrinsic_viota_m_nxv2i16_nxv2i1(<vscale x 2 x i1> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_viota_m_nxv2i16_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -334,7 +336,7 @@ define <vscale x 2 x i16> @intrinsic_viota_m_nxv2i16_nxv2i1(<vscale x 2 x i1> %0
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.viota.nxv2i16(
     <vscale x 2 x i1> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -343,9 +345,9 @@ declare <vscale x 2 x i16> @llvm.riscv.viota.mask.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_viota_mask_m_nxv2i16_nxv2i1(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_viota_mask_m_nxv2i16_nxv2i1(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i16_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -356,16 +358,16 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x i1> %1,
     <vscale x 2 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
 
 declare <vscale x 4 x i16> @llvm.riscv.viota.nxv4i16(
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_viota_m_nxv4i16_nxv4i1(<vscale x 4 x i1> %0, i32 %1) nounwind {
+define <vscale x 4 x i16> @intrinsic_viota_m_nxv4i16_nxv4i1(<vscale x 4 x i1> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_viota_m_nxv4i16_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -374,7 +376,7 @@ define <vscale x 4 x i16> @intrinsic_viota_m_nxv4i16_nxv4i1(<vscale x 4 x i1> %0
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.viota.nxv4i16(
     <vscale x 4 x i1> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -383,9 +385,9 @@ declare <vscale x 4 x i16> @llvm.riscv.viota.mask.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_viota_mask_m_nxv4i16_nxv4i1(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_viota_mask_m_nxv4i16_nxv4i1(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i16_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -396,16 +398,16 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x i1> %1,
     <vscale x 4 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
 
 declare <vscale x 8 x i16> @llvm.riscv.viota.nxv8i16(
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_viota_m_nxv8i16_nxv8i1(<vscale x 8 x i1> %0, i32 %1) nounwind {
+define <vscale x 8 x i16> @intrinsic_viota_m_nxv8i16_nxv8i1(<vscale x 8 x i1> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_viota_m_nxv8i16_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -414,7 +416,7 @@ define <vscale x 8 x i16> @intrinsic_viota_m_nxv8i16_nxv8i1(<vscale x 8 x i1> %0
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.viota.nxv8i16(
     <vscale x 8 x i1> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -423,9 +425,9 @@ declare <vscale x 8 x i16> @llvm.riscv.viota.mask.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_viota_mask_m_nxv8i16_nxv8i1(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_viota_mask_m_nxv8i16_nxv8i1(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i16_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -436,16 +438,16 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x i1> %1,
     <vscale x 8 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
 
 declare <vscale x 16 x i16> @llvm.riscv.viota.nxv16i16(
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_viota_m_nxv16i16_nxv16i1(<vscale x 16 x i1> %0, i32 %1) nounwind {
+define <vscale x 16 x i16> @intrinsic_viota_m_nxv16i16_nxv16i1(<vscale x 16 x i1> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_viota_m_nxv16i16_nxv16i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -454,7 +456,7 @@ define <vscale x 16 x i16> @intrinsic_viota_m_nxv16i16_nxv16i1(<vscale x 16 x i1
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.viota.nxv16i16(
     <vscale x 16 x i1> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -463,9 +465,9 @@ declare <vscale x 16 x i16> @llvm.riscv.viota.mask.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_viota_mask_m_nxv16i16_nxv16i1(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_viota_mask_m_nxv16i16_nxv16i1(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i16_nxv16i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -476,16 +478,16 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x i1> %1,
     <vscale x 16 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
 
 declare <vscale x 32 x i16> @llvm.riscv.viota.nxv32i16(
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_viota_m_nxv32i16_nxv32i1(<vscale x 32 x i1> %0, i32 %1) nounwind {
+define <vscale x 32 x i16> @intrinsic_viota_m_nxv32i16_nxv32i1(<vscale x 32 x i1> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_viota_m_nxv32i16_nxv32i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -494,7 +496,7 @@ define <vscale x 32 x i16> @intrinsic_viota_m_nxv32i16_nxv32i1(<vscale x 32 x i1
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.viota.nxv32i16(
     <vscale x 32 x i1> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -503,9 +505,9 @@ declare <vscale x 32 x i16> @llvm.riscv.viota.mask.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_viota_mask_m_nxv32i16_nxv32i1(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_viota_mask_m_nxv32i16_nxv32i1(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv32i16_nxv32i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
@@ -516,16 +518,16 @@ entry:
     <vscale x 32 x i16> %0,
     <vscale x 32 x i1> %1,
     <vscale x 32 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
 
 declare <vscale x 1 x i32> @llvm.riscv.viota.nxv1i32(
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_viota_m_nxv1i32_nxv1i1(<vscale x 1 x i1> %0, i32 %1) nounwind {
+define <vscale x 1 x i32> @intrinsic_viota_m_nxv1i32_nxv1i1(<vscale x 1 x i1> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_viota_m_nxv1i32_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -534,7 +536,7 @@ define <vscale x 1 x i32> @intrinsic_viota_m_nxv1i32_nxv1i1(<vscale x 1 x i1> %0
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.viota.nxv1i32(
     <vscale x 1 x i1> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -543,9 +545,9 @@ declare <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_viota_mask_m_nxv1i32_nxv1i1(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_viota_mask_m_nxv1i32_nxv1i1(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i32_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -556,16 +558,16 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x i1> %1,
     <vscale x 1 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
 
 declare <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32(
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_viota_m_nxv2i32_nxv2i1(<vscale x 2 x i1> %0, i32 %1) nounwind {
+define <vscale x 2 x i32> @intrinsic_viota_m_nxv2i32_nxv2i1(<vscale x 2 x i1> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_viota_m_nxv2i32_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -574,7 +576,7 @@ define <vscale x 2 x i32> @intrinsic_viota_m_nxv2i32_nxv2i1(<vscale x 2 x i1> %0
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32(
     <vscale x 2 x i1> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -583,9 +585,9 @@ declare <vscale x 2 x i32> @llvm.riscv.viota.mask.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_viota_mask_m_nxv2i32_nxv2i1(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_viota_mask_m_nxv2i32_nxv2i1(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i32_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -596,16 +598,16 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x i1> %1,
     <vscale x 2 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
 
 declare <vscale x 4 x i32> @llvm.riscv.viota.nxv4i32(
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_viota_m_nxv4i32_nxv4i1(<vscale x 4 x i1> %0, i32 %1) nounwind {
+define <vscale x 4 x i32> @intrinsic_viota_m_nxv4i32_nxv4i1(<vscale x 4 x i1> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_viota_m_nxv4i32_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -614,7 +616,7 @@ define <vscale x 4 x i32> @intrinsic_viota_m_nxv4i32_nxv4i1(<vscale x 4 x i1> %0
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.viota.nxv4i32(
     <vscale x 4 x i1> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -623,9 +625,9 @@ declare <vscale x 4 x i32> @llvm.riscv.viota.mask.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_viota_mask_m_nxv4i32_nxv4i1(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_viota_mask_m_nxv4i32_nxv4i1(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i32_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -636,16 +638,16 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x i1> %1,
     <vscale x 4 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
 
 declare <vscale x 8 x i32> @llvm.riscv.viota.nxv8i32(
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_viota_m_nxv8i32_nxv8i1(<vscale x 8 x i1> %0, i32 %1) nounwind {
+define <vscale x 8 x i32> @intrinsic_viota_m_nxv8i32_nxv8i1(<vscale x 8 x i1> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_viota_m_nxv8i32_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -654,7 +656,7 @@ define <vscale x 8 x i32> @intrinsic_viota_m_nxv8i32_nxv8i1(<vscale x 8 x i1> %0
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.viota.nxv8i32(
     <vscale x 8 x i1> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -663,9 +665,9 @@ declare <vscale x 8 x i32> @llvm.riscv.viota.mask.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_viota_mask_m_nxv8i32_nxv8i1(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_viota_mask_m_nxv8i32_nxv8i1(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i32_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -676,16 +678,16 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x i1> %1,
     <vscale x 8 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
 
 declare <vscale x 16 x i32> @llvm.riscv.viota.nxv16i32(
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_viota_m_nxv16i32_nxv16i1(<vscale x 16 x i1> %0, i32 %1) nounwind {
+define <vscale x 16 x i32> @intrinsic_viota_m_nxv16i32_nxv16i1(<vscale x 16 x i1> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_viota_m_nxv16i32_nxv16i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -694,7 +696,7 @@ define <vscale x 16 x i32> @intrinsic_viota_m_nxv16i32_nxv16i1(<vscale x 16 x i1
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.viota.nxv16i32(
     <vscale x 16 x i1> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -703,9 +705,9 @@ declare <vscale x 16 x i32> @llvm.riscv.viota.mask.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_viota_mask_m_nxv16i32_nxv16i1(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_viota_mask_m_nxv16i32_nxv16i1(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i32_nxv16i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
@@ -716,16 +718,16 @@ entry:
     <vscale x 16 x i32> %0,
     <vscale x 16 x i1> %1,
     <vscale x 16 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
 
 declare <vscale x 1 x i64> @llvm.riscv.viota.nxv1i64(
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_viota_m_nxv1i64_nxv1i1(<vscale x 1 x i1> %0, i32 %1) nounwind {
+define <vscale x 1 x i64> @intrinsic_viota_m_nxv1i64_nxv1i1(<vscale x 1 x i1> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_viota_m_nxv1i64_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -734,7 +736,7 @@ define <vscale x 1 x i64> @intrinsic_viota_m_nxv1i64_nxv1i1(<vscale x 1 x i1> %0
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.viota.nxv1i64(
     <vscale x 1 x i1> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -743,9 +745,9 @@ declare <vscale x 1 x i64> @llvm.riscv.viota.mask.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_viota_mask_m_nxv1i64_nxv1i1(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_viota_mask_m_nxv1i64_nxv1i1(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i64_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -756,16 +758,16 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x i1> %1,
     <vscale x 1 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
 
 declare <vscale x 2 x i64> @llvm.riscv.viota.nxv2i64(
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_viota_m_nxv2i64_nxv2i1(<vscale x 2 x i1> %0, i32 %1) nounwind {
+define <vscale x 2 x i64> @intrinsic_viota_m_nxv2i64_nxv2i1(<vscale x 2 x i1> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_viota_m_nxv2i64_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -774,7 +776,7 @@ define <vscale x 2 x i64> @intrinsic_viota_m_nxv2i64_nxv2i1(<vscale x 2 x i1> %0
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.viota.nxv2i64(
     <vscale x 2 x i1> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -783,9 +785,9 @@ declare <vscale x 2 x i64> @llvm.riscv.viota.mask.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_viota_mask_m_nxv2i64_nxv2i1(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_viota_mask_m_nxv2i64_nxv2i1(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i64_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -796,16 +798,16 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x i1> %1,
     <vscale x 2 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
 
 declare <vscale x 4 x i64> @llvm.riscv.viota.nxv4i64(
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_viota_m_nxv4i64_nxv4i1(<vscale x 4 x i1> %0, i32 %1) nounwind {
+define <vscale x 4 x i64> @intrinsic_viota_m_nxv4i64_nxv4i1(<vscale x 4 x i1> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_viota_m_nxv4i64_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -814,7 +816,7 @@ define <vscale x 4 x i64> @intrinsic_viota_m_nxv4i64_nxv4i1(<vscale x 4 x i1> %0
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.viota.nxv4i64(
     <vscale x 4 x i1> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -823,9 +825,9 @@ declare <vscale x 4 x i64> @llvm.riscv.viota.mask.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_viota_mask_m_nxv4i64_nxv4i1(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_viota_mask_m_nxv4i64_nxv4i1(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i64_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -836,16 +838,16 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x i1> %1,
     <vscale x 4 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
 
 declare <vscale x 8 x i64> @llvm.riscv.viota.nxv8i64(
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_viota_m_nxv8i64_nxv8i1(<vscale x 8 x i1> %0, i32 %1) nounwind {
+define <vscale x 8 x i64> @intrinsic_viota_m_nxv8i64_nxv8i1(<vscale x 8 x i1> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_viota_m_nxv8i64_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -854,7 +856,7 @@ define <vscale x 8 x i64> @intrinsic_viota_m_nxv8i64_nxv8i1(<vscale x 8 x i1> %0
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.viota.nxv8i64(
     <vscale x 8 x i1> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -863,9 +865,9 @@ declare <vscale x 8 x i64> @llvm.riscv.viota.mask.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_viota_mask_m_nxv8i64_nxv8i1(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_viota_mask_m_nxv8i64_nxv8i1(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i64_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
@@ -876,7 +878,7 @@ entry:
     <vscale x 8 x i64> %0,
     <vscale x 8 x i1> %1,
     <vscale x 8 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vlm-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlm-rv64.ll
deleted file mode 100644
index 21b3f7b0b0ad2..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vlm-rv64.ll
+++ /dev/null
@@ -1,94 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-
-declare <vscale x 1 x i1> @llvm.riscv.vlm.nxv1i1(<vscale x 1 x i1>*, i64);
-
-define <vscale x 1 x i1> @intrinsic_vlm_v_nxv1i1(<vscale x 1 x i1>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vlm_v_nxv1i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vlm.v v0, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vlm.nxv1i1(<vscale x 1 x i1>* %0, i64 %1)
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vlm.nxv2i1(<vscale x 2 x i1>*, i64);
-
-define <vscale x 2 x i1> @intrinsic_vlm_v_nxv2i1(<vscale x 2 x i1>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vlm_v_nxv2i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vlm.v v0, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vlm.nxv2i1(<vscale x 2 x i1>* %0, i64 %1)
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vlm.nxv4i1(<vscale x 4 x i1>*, i64);
-
-define <vscale x 4 x i1> @intrinsic_vlm_v_nxv4i1(<vscale x 4 x i1>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vlm_v_nxv4i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vlm.v v0, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vlm.nxv4i1(<vscale x 4 x i1>* %0, i64 %1)
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vlm.nxv8i1(<vscale x 8 x i1>*, i64);
-
-define <vscale x 8 x i1> @intrinsic_vlm_v_nxv8i1(<vscale x 8 x i1>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vlm_v_nxv8i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vlm.v v0, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vlm.nxv8i1(<vscale x 8 x i1>* %0, i64 %1)
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vlm.nxv16i1(<vscale x 16 x i1>*, i64);
-
-define <vscale x 16 x i1> @intrinsic_vlm_v_nxv16i1(<vscale x 16 x i1>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vlm_v_nxv16i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vlm.v v0, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vlm.nxv16i1(<vscale x 16 x i1>* %0, i64 %1)
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vlm.nxv32i1(<vscale x 32 x i1>*, i64);
-
-define <vscale x 32 x i1> @intrinsic_vlm_v_nxv32i1(<vscale x 32 x i1>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vlm_v_nxv32i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vlm.v v0, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vlm.nxv32i1(<vscale x 32 x i1>* %0, i64 %1)
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 64 x i1> @llvm.riscv.vlm.nxv64i1(<vscale x 64 x i1>*, i64);
-
-define <vscale x 64 x i1> @intrinsic_vlm_v_nxv64i1(<vscale x 64 x i1>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vlm_v_nxv64i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vlm.v v0, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i1> @llvm.riscv.vlm.nxv64i1(<vscale x 64 x i1>* %0, i64 %1)
-  ret <vscale x 64 x i1> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vlm-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlm.ll
similarity index 80%
rename from llvm/test/CodeGen/RISCV/rvv/vlm-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vlm.ll
index 72cac45129035..765db6d47d7a5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vlm-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vlm.ll
@@ -1,94 +1,96 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
 
-declare <vscale x 1 x i1> @llvm.riscv.vlm.nxv1i1(<vscale x 1 x i1>*, i32);
+declare <vscale x 1 x i1> @llvm.riscv.vlm.nxv1i1(<vscale x 1 x i1>*, iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vlm_v_nxv1i1(<vscale x 1 x i1>* %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vlm_v_nxv1i1(<vscale x 1 x i1>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vlm_v_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vlm.v v0, (a0)
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vlm.nxv1i1(<vscale x 1 x i1>* %0, i32 %1)
+  %a = call <vscale x 1 x i1> @llvm.riscv.vlm.nxv1i1(<vscale x 1 x i1>* %0, iXLen %1)
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vlm.nxv2i1(<vscale x 2 x i1>*, i32);
+declare <vscale x 2 x i1> @llvm.riscv.vlm.nxv2i1(<vscale x 2 x i1>*, iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vlm_v_nxv2i1(<vscale x 2 x i1>* %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vlm_v_nxv2i1(<vscale x 2 x i1>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vlm_v_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vlm.v v0, (a0)
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vlm.nxv2i1(<vscale x 2 x i1>* %0, i32 %1)
+  %a = call <vscale x 2 x i1> @llvm.riscv.vlm.nxv2i1(<vscale x 2 x i1>* %0, iXLen %1)
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vlm.nxv4i1(<vscale x 4 x i1>*, i32);
+declare <vscale x 4 x i1> @llvm.riscv.vlm.nxv4i1(<vscale x 4 x i1>*, iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vlm_v_nxv4i1(<vscale x 4 x i1>* %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vlm_v_nxv4i1(<vscale x 4 x i1>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vlm_v_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vlm.v v0, (a0)
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vlm.nxv4i1(<vscale x 4 x i1>* %0, i32 %1)
+  %a = call <vscale x 4 x i1> @llvm.riscv.vlm.nxv4i1(<vscale x 4 x i1>* %0, iXLen %1)
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vlm.nxv8i1(<vscale x 8 x i1>*, i32);
+declare <vscale x 8 x i1> @llvm.riscv.vlm.nxv8i1(<vscale x 8 x i1>*, iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vlm_v_nxv8i1(<vscale x 8 x i1>* %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vlm_v_nxv8i1(<vscale x 8 x i1>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vlm_v_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vlm.v v0, (a0)
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vlm.nxv8i1(<vscale x 8 x i1>* %0, i32 %1)
+  %a = call <vscale x 8 x i1> @llvm.riscv.vlm.nxv8i1(<vscale x 8 x i1>* %0, iXLen %1)
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vlm.nxv16i1(<vscale x 16 x i1>*, i32);
+declare <vscale x 16 x i1> @llvm.riscv.vlm.nxv16i1(<vscale x 16 x i1>*, iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vlm_v_nxv16i1(<vscale x 16 x i1>* %0, i32 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vlm_v_nxv16i1(<vscale x 16 x i1>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vlm_v_nxv16i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vlm.v v0, (a0)
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vlm.nxv16i1(<vscale x 16 x i1>* %0, i32 %1)
+  %a = call <vscale x 16 x i1> @llvm.riscv.vlm.nxv16i1(<vscale x 16 x i1>* %0, iXLen %1)
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 32 x i1> @llvm.riscv.vlm.nxv32i1(<vscale x 32 x i1>*, i32);
+declare <vscale x 32 x i1> @llvm.riscv.vlm.nxv32i1(<vscale x 32 x i1>*, iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vlm_v_nxv32i1(<vscale x 32 x i1>* %0, i32 %1) nounwind {
+define <vscale x 32 x i1> @intrinsic_vlm_v_nxv32i1(<vscale x 32 x i1>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vlm_v_nxv32i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    vlm.v v0, (a0)
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vlm.nxv32i1(<vscale x 32 x i1>* %0, i32 %1)
+  %a = call <vscale x 32 x i1> @llvm.riscv.vlm.nxv32i1(<vscale x 32 x i1>* %0, iXLen %1)
   ret <vscale x 32 x i1> %a
 }
 
-declare <vscale x 64 x i1> @llvm.riscv.vlm.nxv64i1(<vscale x 64 x i1>*, i32);
+declare <vscale x 64 x i1> @llvm.riscv.vlm.nxv64i1(<vscale x 64 x i1>*, iXLen);
 
-define <vscale x 64 x i1> @intrinsic_vlm_v_nxv64i1(<vscale x 64 x i1>* %0, i32 %1) nounwind {
+define <vscale x 64 x i1> @intrinsic_vlm_v_nxv64i1(<vscale x 64 x i1>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vlm_v_nxv64i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    vlm.v v0, (a0)
 ; CHECK-NEXT:    ret
 entry:
-  %a = call <vscale x 64 x i1> @llvm.riscv.vlm.nxv64i1(<vscale x 64 x i1>* %0, i32 %1)
+  %a = call <vscale x 64 x i1> @llvm.riscv.vlm.nxv64i1(<vscale x 64 x i1>* %0, iXLen %1)
   ret <vscale x 64 x i1> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmand-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmand-rv64.ll
deleted file mode 100644
index 12107a960f87d..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vmand-rv64.ll
+++ /dev/null
@@ -1,142 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmand_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmand_mm_nxv1i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmand.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmand.nxv2i1(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmand_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmand_mm_nxv2i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmand.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmand.nxv2i1(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmand_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmand_mm_nxv4i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmand.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmand.nxv8i1(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmand_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmand_mm_nxv8i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmand.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmand.nxv8i1(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmand.nxv16i1(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmand_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmand_mm_nxv16i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmand.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmand.nxv16i1(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmand.nxv32i1(
-  <vscale x 32 x i1>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmand_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmand_mm_nxv32i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmand.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmand.nxv32i1(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 64 x i1> @llvm.riscv.vmand.nxv64i1(
-  <vscale x 64 x i1>,
-  <vscale x 64 x i1>,
-  i64);
-
-define <vscale x 64 x i1> @intrinsic_vmand_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmand_mm_nxv64i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT:    vmand.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmand.nxv64i1(
-    <vscale x 64 x i1> %0,
-    <vscale x 64 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 64 x i1> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmand.ll
similarity index 82%
rename from llvm/test/CodeGen/RISCV/rvv/vmand-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vmand.ll
index 4d0a65d0f892b..2743f9deab803 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmand-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmand.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmand_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmand_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmand_mm_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -16,7 +18,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -24,9 +26,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmand.nxv2i1(
   <vscale x 2 x i1>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmand_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmand_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmand_mm_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -36,7 +38,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmand.nxv2i1(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -44,9 +46,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(
   <vscale x 4 x i1>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmand_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmand_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmand_mm_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -56,7 +58,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -64,9 +66,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmand.nxv8i1(
   <vscale x 8 x i1>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmand_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmand_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmand_mm_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -76,7 +78,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmand.nxv8i1(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -84,9 +86,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmand.nxv16i1(
   <vscale x 16 x i1>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmand_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmand_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmand_mm_nxv16i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -96,7 +98,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmand.nxv16i1(
     <vscale x 16 x i1> %0,
     <vscale x 16 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -104,9 +106,9 @@ entry:
 declare <vscale x 32 x i1> @llvm.riscv.vmand.nxv32i1(
   <vscale x 32 x i1>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmand_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmand_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmand_mm_nxv32i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -116,7 +118,7 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmand.nxv32i1(
     <vscale x 32 x i1> %0,
     <vscale x 32 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i1> %a
 }
@@ -124,9 +126,9 @@ entry:
 declare <vscale x 64 x i1> @llvm.riscv.vmand.nxv64i1(
   <vscale x 64 x i1>,
   <vscale x 64 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i1> @intrinsic_vmand_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
+define <vscale x 64 x i1> @intrinsic_vmand_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmand_mm_nxv64i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
@@ -136,7 +138,7 @@ entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmand.nxv64i1(
     <vscale x 64 x i1> %0,
     <vscale x 64 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i1> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmandn-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmandn-rv64.ll
deleted file mode 100644
index 5ad6aa2ee4d8f..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vmandn-rv64.ll
+++ /dev/null
@@ -1,142 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmandn.nxv1i1(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmandn_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmandn_mm_nxv1i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmandn.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmandn.nxv1i1(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmandn.nxv2i1(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmandn_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmandn_mm_nxv2i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmandn.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmandn.nxv2i1(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmandn.nxv4i1(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmandn_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmandn_mm_nxv4i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmandn.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmandn.nxv4i1(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmandn.nxv8i1(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmandn_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmandn_mm_nxv8i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmandn.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmandn.nxv8i1(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmandn.nxv16i1(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmandn_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmandn_mm_nxv16i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmandn.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmandn.nxv16i1(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmandn.nxv32i1(
-  <vscale x 32 x i1>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmandn_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmandn_mm_nxv32i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmandn.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmandn.nxv32i1(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 64 x i1> @llvm.riscv.vmandn.nxv64i1(
-  <vscale x 64 x i1>,
-  <vscale x 64 x i1>,
-  i64);
-
-define <vscale x 64 x i1> @intrinsic_vmandn_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmandn_mm_nxv64i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT:    vmandn.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmandn.nxv64i1(
-    <vscale x 64 x i1> %0,
-    <vscale x 64 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 64 x i1> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmandn-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmandn.ll
similarity index 82%
rename from llvm/test/CodeGen/RISCV/rvv/vmandn-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vmandn.ll
index 90d6cba3592d0..c7977e9855ea5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmandn-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmandn.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmandn.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmandn_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmandn_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmandn_mm_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -16,7 +18,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmandn.nxv1i1(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -24,9 +26,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmandn.nxv2i1(
   <vscale x 2 x i1>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmandn_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmandn_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmandn_mm_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -36,7 +38,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmandn.nxv2i1(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -44,9 +46,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmandn.nxv4i1(
   <vscale x 4 x i1>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmandn_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmandn_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmandn_mm_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -56,7 +58,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmandn.nxv4i1(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -64,9 +66,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmandn.nxv8i1(
   <vscale x 8 x i1>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmandn_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmandn_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmandn_mm_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -76,7 +78,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmandn.nxv8i1(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -84,9 +86,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmandn.nxv16i1(
   <vscale x 16 x i1>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmandn_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmandn_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmandn_mm_nxv16i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -96,7 +98,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmandn.nxv16i1(
     <vscale x 16 x i1> %0,
     <vscale x 16 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -104,9 +106,9 @@ entry:
 declare <vscale x 32 x i1> @llvm.riscv.vmandn.nxv32i1(
   <vscale x 32 x i1>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmandn_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmandn_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmandn_mm_nxv32i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -116,7 +118,7 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmandn.nxv32i1(
     <vscale x 32 x i1> %0,
     <vscale x 32 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i1> %a
 }
@@ -124,9 +126,9 @@ entry:
 declare <vscale x 64 x i1> @llvm.riscv.vmandn.nxv64i1(
   <vscale x 64 x i1>,
   <vscale x 64 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i1> @intrinsic_vmandn_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
+define <vscale x 64 x i1> @intrinsic_vmandn_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmandn_mm_nxv64i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
@@ -136,7 +138,7 @@ entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmandn.nxv64i1(
     <vscale x 64 x i1> %0,
     <vscale x 64 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i1> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmclr-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmclr-rv64.ll
deleted file mode 100644
index 50516db35a4fc..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vmclr-rv64.ll
+++ /dev/null
@@ -1,114 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmclr.nxv1i1(
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmclr_m_pseudo_nxv1i1(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv1i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmclr.m v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmclr.nxv1i1(
-    i64 %0)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmclr.nxv2i1(
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmclr_m_pseudo_nxv2i1(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv2i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmclr.m v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmclr.nxv2i1(
-    i64 %0)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmclr.nxv4i1(
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmclr_m_pseudo_nxv4i1(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv4i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmclr.m v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmclr.nxv4i1(
-    i64 %0)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmclr.nxv8i1(
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmclr_m_pseudo_nxv8i1(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv8i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmclr.m v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmclr.nxv8i1(
-    i64 %0)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmclr.nxv16i1(
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmclr_m_pseudo_nxv16i1(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv16i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmclr.m v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmclr.nxv16i1(
-    i64 %0)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmclr.nxv32i1(
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmclr_m_pseudo_nxv32i1(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv32i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmclr.m v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmclr.nxv32i1(
-    i64 %0)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 64 x i1> @llvm.riscv.vmclr.nxv64i1(
-  i64);
-
-define <vscale x 64 x i1> @intrinsic_vmclr_m_pseudo_nxv64i1(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv64i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT:    vmclr.m v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmclr.nxv64i1(
-    i64 %0)
-
-  ret <vscale x 64 x i1> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmclr-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmclr.ll
similarity index 72%
rename from llvm/test/CodeGen/RISCV/rvv/vmclr-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vmclr.ll
index 49fb55c901b2b..f786f033db2ae 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmclr-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmclr.ll
@@ -1,10 +1,12 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmclr.nxv1i1(
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmclr_m_pseudo_nxv1i1(i32 %0) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmclr_m_pseudo_nxv1i1(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -12,15 +14,15 @@ define <vscale x 1 x i1> @intrinsic_vmclr_m_pseudo_nxv1i1(i32 %0) nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmclr.nxv1i1(
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 1 x i1> %a
 }
 
 declare <vscale x 2 x i1> @llvm.riscv.vmclr.nxv2i1(
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmclr_m_pseudo_nxv2i1(i32 %0) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmclr_m_pseudo_nxv2i1(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -28,15 +30,15 @@ define <vscale x 2 x i1> @intrinsic_vmclr_m_pseudo_nxv2i1(i32 %0) nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmclr.nxv2i1(
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 2 x i1> %a
 }
 
 declare <vscale x 4 x i1> @llvm.riscv.vmclr.nxv4i1(
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmclr_m_pseudo_nxv4i1(i32 %0) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmclr_m_pseudo_nxv4i1(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -44,15 +46,15 @@ define <vscale x 4 x i1> @intrinsic_vmclr_m_pseudo_nxv4i1(i32 %0) nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmclr.nxv4i1(
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 4 x i1> %a
 }
 
 declare <vscale x 8 x i1> @llvm.riscv.vmclr.nxv8i1(
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmclr_m_pseudo_nxv8i1(i32 %0) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmclr_m_pseudo_nxv8i1(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -60,15 +62,15 @@ define <vscale x 8 x i1> @intrinsic_vmclr_m_pseudo_nxv8i1(i32 %0) nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmclr.nxv8i1(
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 8 x i1> %a
 }
 
 declare <vscale x 16 x i1> @llvm.riscv.vmclr.nxv16i1(
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmclr_m_pseudo_nxv16i1(i32 %0) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmclr_m_pseudo_nxv16i1(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv16i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -76,15 +78,15 @@ define <vscale x 16 x i1> @intrinsic_vmclr_m_pseudo_nxv16i1(i32 %0) nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmclr.nxv16i1(
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 16 x i1> %a
 }
 
 declare <vscale x 32 x i1> @llvm.riscv.vmclr.nxv32i1(
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmclr_m_pseudo_nxv32i1(i32 %0) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmclr_m_pseudo_nxv32i1(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv32i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -92,15 +94,15 @@ define <vscale x 32 x i1> @intrinsic_vmclr_m_pseudo_nxv32i1(i32 %0) nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmclr.nxv32i1(
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 32 x i1> %a
 }
 
 declare <vscale x 64 x i1> @llvm.riscv.vmclr.nxv64i1(
-  i32);
+  iXLen);
 
-define <vscale x 64 x i1> @intrinsic_vmclr_m_pseudo_nxv64i1(i32 %0) nounwind {
+define <vscale x 64 x i1> @intrinsic_vmclr_m_pseudo_nxv64i1(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv64i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
@@ -108,7 +110,7 @@ define <vscale x 64 x i1> @intrinsic_vmclr_m_pseudo_nxv64i1(i32 %0) nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmclr.nxv64i1(
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 64 x i1> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmnand-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmnand-rv64.ll
deleted file mode 100644
index b4397d512e34b..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vmnand-rv64.ll
+++ /dev/null
@@ -1,142 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmnand.nxv1i1(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmnand_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmnand_mm_nxv1i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmnand.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmnand.nxv1i1(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmnand.nxv2i1(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmnand_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmnand_mm_nxv2i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmnand.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmnand.nxv2i1(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmnand.nxv4i1(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmnand_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmnand_mm_nxv4i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmnand.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmnand.nxv4i1(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmnand.nxv8i1(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmnand_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmnand_mm_nxv8i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmnand.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmnand.nxv8i1(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmnand.nxv16i1(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmnand_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmnand_mm_nxv16i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmnand.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmnand.nxv16i1(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmnand.nxv32i1(
-  <vscale x 32 x i1>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmnand_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmnand_mm_nxv32i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmnand.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmnand.nxv32i1(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 64 x i1> @llvm.riscv.vmnand.nxv64i1(
-  <vscale x 64 x i1>,
-  <vscale x 64 x i1>,
-  i64);
-
-define <vscale x 64 x i1> @intrinsic_vmnand_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmnand_mm_nxv64i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT:    vmnand.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmnand.nxv64i1(
-    <vscale x 64 x i1> %0,
-    <vscale x 64 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 64 x i1> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmnand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmnand.ll
similarity index 82%
rename from llvm/test/CodeGen/RISCV/rvv/vmnand-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vmnand.ll
index 958d38f0023f6..ee128c178c552 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmnand-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmnand.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmnand.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmnand_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmnand_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmnand_mm_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -16,7 +18,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmnand.nxv1i1(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -24,9 +26,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmnand.nxv2i1(
   <vscale x 2 x i1>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmnand_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmnand_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmnand_mm_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -36,7 +38,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmnand.nxv2i1(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -44,9 +46,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmnand.nxv4i1(
   <vscale x 4 x i1>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmnand_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmnand_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmnand_mm_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -56,7 +58,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmnand.nxv4i1(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -64,9 +66,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmnand.nxv8i1(
   <vscale x 8 x i1>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmnand_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmnand_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmnand_mm_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -76,7 +78,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmnand.nxv8i1(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -84,9 +86,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmnand.nxv16i1(
   <vscale x 16 x i1>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmnand_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmnand_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmnand_mm_nxv16i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -96,7 +98,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmnand.nxv16i1(
     <vscale x 16 x i1> %0,
     <vscale x 16 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -104,9 +106,9 @@ entry:
 declare <vscale x 32 x i1> @llvm.riscv.vmnand.nxv32i1(
   <vscale x 32 x i1>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmnand_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmnand_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmnand_mm_nxv32i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -116,7 +118,7 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmnand.nxv32i1(
     <vscale x 32 x i1> %0,
     <vscale x 32 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i1> %a
 }
@@ -124,9 +126,9 @@ entry:
 declare <vscale x 64 x i1> @llvm.riscv.vmnand.nxv64i1(
   <vscale x 64 x i1>,
   <vscale x 64 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i1> @intrinsic_vmnand_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
+define <vscale x 64 x i1> @intrinsic_vmnand_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmnand_mm_nxv64i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
@@ -136,7 +138,7 @@ entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmnand.nxv64i1(
     <vscale x 64 x i1> %0,
     <vscale x 64 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i1> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmnor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmnor-rv64.ll
deleted file mode 100644
index 5beaeadcdcab2..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vmnor-rv64.ll
+++ /dev/null
@@ -1,142 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmnor.nxv1i1(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmnor_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmnor_mm_nxv1i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmnor.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmnor.nxv1i1(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmnor.nxv2i1(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmnor_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmnor_mm_nxv2i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmnor.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmnor.nxv2i1(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmnor.nxv4i1(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmnor_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmnor_mm_nxv4i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmnor.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmnor.nxv4i1(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmnor.nxv8i1(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmnor_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmnor_mm_nxv8i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmnor.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmnor.nxv8i1(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmnor.nxv16i1(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmnor_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmnor_mm_nxv16i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmnor.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmnor.nxv16i1(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmnor.nxv32i1(
-  <vscale x 32 x i1>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmnor_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmnor_mm_nxv32i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmnor.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmnor.nxv32i1(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 64 x i1> @llvm.riscv.vmnor.nxv64i1(
-  <vscale x 64 x i1>,
-  <vscale x 64 x i1>,
-  i64);
-
-define <vscale x 64 x i1> @intrinsic_vmnor_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmnor_mm_nxv64i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT:    vmnor.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmnor.nxv64i1(
-    <vscale x 64 x i1> %0,
-    <vscale x 64 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 64 x i1> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmnor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmnor.ll
similarity index 82%
rename from llvm/test/CodeGen/RISCV/rvv/vmnor-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vmnor.ll
index ca5bcf5463fa5..9eb829964b0f1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmnor-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmnor.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmnor.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmnor_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmnor_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmnor_mm_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -16,7 +18,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmnor.nxv1i1(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -24,9 +26,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmnor.nxv2i1(
   <vscale x 2 x i1>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmnor_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmnor_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmnor_mm_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -36,7 +38,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmnor.nxv2i1(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -44,9 +46,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmnor.nxv4i1(
   <vscale x 4 x i1>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmnor_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmnor_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmnor_mm_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -56,7 +58,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmnor.nxv4i1(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -64,9 +66,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmnor.nxv8i1(
   <vscale x 8 x i1>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmnor_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmnor_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmnor_mm_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -76,7 +78,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmnor.nxv8i1(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -84,9 +86,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmnor.nxv16i1(
   <vscale x 16 x i1>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmnor_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmnor_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmnor_mm_nxv16i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -96,7 +98,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmnor.nxv16i1(
     <vscale x 16 x i1> %0,
     <vscale x 16 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -104,9 +106,9 @@ entry:
 declare <vscale x 32 x i1> @llvm.riscv.vmnor.nxv32i1(
   <vscale x 32 x i1>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmnor_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmnor_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmnor_mm_nxv32i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -116,7 +118,7 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmnor.nxv32i1(
     <vscale x 32 x i1> %0,
     <vscale x 32 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i1> %a
 }
@@ -124,9 +126,9 @@ entry:
 declare <vscale x 64 x i1> @llvm.riscv.vmnor.nxv64i1(
   <vscale x 64 x i1>,
   <vscale x 64 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i1> @intrinsic_vmnor_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
+define <vscale x 64 x i1> @intrinsic_vmnor_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmnor_mm_nxv64i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
@@ -136,7 +138,7 @@ entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmnor.nxv64i1(
     <vscale x 64 x i1> %0,
     <vscale x 64 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i1> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmor-rv64.ll
deleted file mode 100644
index 81fa40663fc1a..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vmor-rv64.ll
+++ /dev/null
@@ -1,142 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmor.nxv1i1(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmor_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmor_mm_nxv1i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmor.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmor.nxv1i1(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmor.nxv2i1(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmor_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmor_mm_nxv2i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmor.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmor.nxv2i1(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmor.nxv4i1(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmor_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmor_mm_nxv4i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmor.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmor.nxv4i1(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmor.nxv8i1(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmor_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmor_mm_nxv8i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmor.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmor.nxv8i1(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmor.nxv16i1(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmor_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmor_mm_nxv16i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmor.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmor.nxv16i1(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmor.nxv32i1(
-  <vscale x 32 x i1>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmor_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmor_mm_nxv32i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmor.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmor.nxv32i1(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 64 x i1> @llvm.riscv.vmor.nxv64i1(
-  <vscale x 64 x i1>,
-  <vscale x 64 x i1>,
-  i64);
-
-define <vscale x 64 x i1> @intrinsic_vmor_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmor_mm_nxv64i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT:    vmor.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmor.nxv64i1(
-    <vscale x 64 x i1> %0,
-    <vscale x 64 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 64 x i1> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmor.ll
similarity index 82%
rename from llvm/test/CodeGen/RISCV/rvv/vmor-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vmor.ll
index 117d152f7248c..a5bcc135150dc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmor-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmor.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmor.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmor_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmor_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmor_mm_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -16,7 +18,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmor.nxv1i1(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -24,9 +26,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmor.nxv2i1(
   <vscale x 2 x i1>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmor_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmor_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmor_mm_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -36,7 +38,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmor.nxv2i1(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -44,9 +46,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmor.nxv4i1(
   <vscale x 4 x i1>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmor_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmor_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmor_mm_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -56,7 +58,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmor.nxv4i1(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -64,9 +66,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmor.nxv8i1(
   <vscale x 8 x i1>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmor_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmor_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmor_mm_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -76,7 +78,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmor.nxv8i1(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -84,9 +86,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmor.nxv16i1(
   <vscale x 16 x i1>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmor_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmor_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmor_mm_nxv16i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -96,7 +98,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmor.nxv16i1(
     <vscale x 16 x i1> %0,
     <vscale x 16 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -104,9 +106,9 @@ entry:
 declare <vscale x 32 x i1> @llvm.riscv.vmor.nxv32i1(
   <vscale x 32 x i1>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmor_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmor_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmor_mm_nxv32i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -116,7 +118,7 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmor.nxv32i1(
     <vscale x 32 x i1> %0,
     <vscale x 32 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i1> %a
 }
@@ -124,9 +126,9 @@ entry:
 declare <vscale x 64 x i1> @llvm.riscv.vmor.nxv64i1(
   <vscale x 64 x i1>,
   <vscale x 64 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i1> @intrinsic_vmor_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
+define <vscale x 64 x i1> @intrinsic_vmor_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmor_mm_nxv64i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
@@ -136,7 +138,7 @@ entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmor.nxv64i1(
     <vscale x 64 x i1> %0,
     <vscale x 64 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i1> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmorn-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmorn-rv64.ll
deleted file mode 100644
index 89de2b78e94f5..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vmorn-rv64.ll
+++ /dev/null
@@ -1,142 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmorn.nxv1i1(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmorn_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmorn_mm_nxv1i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmorn.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmorn.nxv1i1(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmorn.nxv2i1(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmorn_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmorn_mm_nxv2i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmorn.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmorn.nxv2i1(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmorn.nxv4i1(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmorn_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmorn_mm_nxv4i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmorn.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmorn.nxv4i1(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmorn.nxv8i1(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmorn_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmorn_mm_nxv8i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmorn.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmorn.nxv8i1(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmorn.nxv16i1(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmorn_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmorn_mm_nxv16i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmorn.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmorn.nxv16i1(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmorn.nxv32i1(
-  <vscale x 32 x i1>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmorn_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmorn_mm_nxv32i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmorn.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmorn.nxv32i1(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 64 x i1> @llvm.riscv.vmorn.nxv64i1(
-  <vscale x 64 x i1>,
-  <vscale x 64 x i1>,
-  i64);
-
-define <vscale x 64 x i1> @intrinsic_vmorn_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmorn_mm_nxv64i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT:    vmorn.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmorn.nxv64i1(
-    <vscale x 64 x i1> %0,
-    <vscale x 64 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 64 x i1> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmorn-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmorn.ll
similarity index 82%
rename from llvm/test/CodeGen/RISCV/rvv/vmorn-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vmorn.ll
index 8509603acd2f5..eddfe24493f70 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmorn-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmorn.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmorn.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmorn_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmorn_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmorn_mm_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -16,7 +18,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmorn.nxv1i1(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -24,9 +26,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmorn.nxv2i1(
   <vscale x 2 x i1>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmorn_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmorn_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmorn_mm_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -36,7 +38,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmorn.nxv2i1(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -44,9 +46,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmorn.nxv4i1(
   <vscale x 4 x i1>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmorn_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmorn_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmorn_mm_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -56,7 +58,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmorn.nxv4i1(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -64,9 +66,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmorn.nxv8i1(
   <vscale x 8 x i1>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmorn_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmorn_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmorn_mm_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -76,7 +78,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmorn.nxv8i1(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -84,9 +86,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmorn.nxv16i1(
   <vscale x 16 x i1>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmorn_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmorn_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmorn_mm_nxv16i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -96,7 +98,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmorn.nxv16i1(
     <vscale x 16 x i1> %0,
     <vscale x 16 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -104,9 +106,9 @@ entry:
 declare <vscale x 32 x i1> @llvm.riscv.vmorn.nxv32i1(
   <vscale x 32 x i1>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmorn_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmorn_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmorn_mm_nxv32i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -116,7 +118,7 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmorn.nxv32i1(
     <vscale x 32 x i1> %0,
     <vscale x 32 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i1> %a
 }
@@ -124,9 +126,9 @@ entry:
 declare <vscale x 64 x i1> @llvm.riscv.vmorn.nxv64i1(
   <vscale x 64 x i1>,
   <vscale x 64 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i1> @intrinsic_vmorn_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
+define <vscale x 64 x i1> @intrinsic_vmorn_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmorn_mm_nxv64i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
@@ -136,7 +138,7 @@ entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmorn.nxv64i1(
     <vscale x 64 x i1> %0,
     <vscale x 64 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i1> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmset-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmset-rv64.ll
deleted file mode 100644
index f34157dfd3797..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vmset-rv64.ll
+++ /dev/null
@@ -1,114 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmset.nxv1i1(
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmset_m_pseudo_nxv1i1(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv1i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmset.m v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmset.nxv1i1(
-    i64 %0)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmset.nxv2i1(
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmset_m_pseudo_nxv2i1(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv2i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmset.m v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmset.nxv2i1(
-    i64 %0)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmset.nxv4i1(
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmset_m_pseudo_nxv4i1(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv4i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmset.m v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmset.nxv4i1(
-    i64 %0)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmset.nxv8i1(
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmset_m_pseudo_nxv8i1(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv8i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmset.m v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmset.nxv8i1(
-    i64 %0)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmset.nxv16i1(
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmset_m_pseudo_nxv16i1(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv16i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmset.m v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmset.nxv16i1(
-    i64 %0)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmset.nxv32i1(
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmset_m_pseudo_nxv32i1(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv32i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmset.m v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmset.nxv32i1(
-    i64 %0)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 64 x i1> @llvm.riscv.vmset.nxv64i1(
-  i64);
-
-define <vscale x 64 x i1> @intrinsic_vmset_m_pseudo_nxv64i1(i64 %0) nounwind {
-; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv64i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT:    vmset.m v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmset.nxv64i1(
-    i64 %0)
-
-  ret <vscale x 64 x i1> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmset-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmset.ll
similarity index 72%
rename from llvm/test/CodeGen/RISCV/rvv/vmset-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vmset.ll
index 68a6719c76580..19b05954e243f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmset-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmset.ll
@@ -1,10 +1,12 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmset.nxv1i1(
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmset_m_pseudo_nxv1i1(i32 %0) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmset_m_pseudo_nxv1i1(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -12,15 +14,15 @@ define <vscale x 1 x i1> @intrinsic_vmset_m_pseudo_nxv1i1(i32 %0) nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmset.nxv1i1(
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 1 x i1> %a
 }
 
 declare <vscale x 2 x i1> @llvm.riscv.vmset.nxv2i1(
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmset_m_pseudo_nxv2i1(i32 %0) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmset_m_pseudo_nxv2i1(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -28,15 +30,15 @@ define <vscale x 2 x i1> @intrinsic_vmset_m_pseudo_nxv2i1(i32 %0) nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmset.nxv2i1(
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 2 x i1> %a
 }
 
 declare <vscale x 4 x i1> @llvm.riscv.vmset.nxv4i1(
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmset_m_pseudo_nxv4i1(i32 %0) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmset_m_pseudo_nxv4i1(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -44,15 +46,15 @@ define <vscale x 4 x i1> @intrinsic_vmset_m_pseudo_nxv4i1(i32 %0) nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmset.nxv4i1(
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 4 x i1> %a
 }
 
 declare <vscale x 8 x i1> @llvm.riscv.vmset.nxv8i1(
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmset_m_pseudo_nxv8i1(i32 %0) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmset_m_pseudo_nxv8i1(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -60,15 +62,15 @@ define <vscale x 8 x i1> @intrinsic_vmset_m_pseudo_nxv8i1(i32 %0) nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmset.nxv8i1(
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 8 x i1> %a
 }
 
 declare <vscale x 16 x i1> @llvm.riscv.vmset.nxv16i1(
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmset_m_pseudo_nxv16i1(i32 %0) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmset_m_pseudo_nxv16i1(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv16i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -76,15 +78,15 @@ define <vscale x 16 x i1> @intrinsic_vmset_m_pseudo_nxv16i1(i32 %0) nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmset.nxv16i1(
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 16 x i1> %a
 }
 
 declare <vscale x 32 x i1> @llvm.riscv.vmset.nxv32i1(
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmset_m_pseudo_nxv32i1(i32 %0) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmset_m_pseudo_nxv32i1(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv32i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -92,15 +94,15 @@ define <vscale x 32 x i1> @intrinsic_vmset_m_pseudo_nxv32i1(i32 %0) nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmset.nxv32i1(
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 32 x i1> %a
 }
 
 declare <vscale x 64 x i1> @llvm.riscv.vmset.nxv64i1(
-  i32);
+  iXLen);
 
-define <vscale x 64 x i1> @intrinsic_vmset_m_pseudo_nxv64i1(i32 %0) nounwind {
+define <vscale x 64 x i1> @intrinsic_vmset_m_pseudo_nxv64i1(iXLen %0) nounwind {
 ; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv64i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
@@ -108,7 +110,7 @@ define <vscale x 64 x i1> @intrinsic_vmset_m_pseudo_nxv64i1(i32 %0) nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmset.nxv64i1(
-    i32 %0)
+    iXLen %0)
 
   ret <vscale x 64 x i1> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll
deleted file mode 100644
index 0f776c83e0129..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll
+++ /dev/null
@@ -1,296 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1(
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsif_m_nxv1i1(<vscale x 1 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsif_m_nxv1i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmsif.m v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1(
-    <vscale x 1 x i1> %0,
-    i64 %1)
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsif.mask.nxv1i1(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i1>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsif_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv1i1_nxv1i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsif.m v10, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsif.mask.nxv1i1(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i1> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsif.nxv2i1(
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsif_m_nxv2i1(<vscale x 2 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsif_m_nxv2i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmsif.m v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsif.nxv2i1(
-    <vscale x 2 x i1> %0,
-    i64 %1)
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsif.mask.nxv2i1(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i1>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsif_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv2i1_nxv2i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsif.m v10, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsif.mask.nxv2i1(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i1> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsif.nxv4i1(
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsif_m_nxv4i1(<vscale x 4 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsif_m_nxv4i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmsif.m v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsif.nxv4i1(
-    <vscale x 4 x i1> %0,
-    i64 %1)
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsif.mask.nxv4i1(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i1>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsif_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv4i1_nxv4i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsif.m v10, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsif.mask.nxv4i1(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i1> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsif.nxv8i1(
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsif_m_nxv8i1(<vscale x 8 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsif_m_nxv8i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmsif.m v8, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsif.nxv8i1(
-    <vscale x 8 x i1> %0,
-    i64 %1)
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsif.mask.nxv8i1(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i1>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsif_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv8i1_nxv8i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsif.m v10, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsif.mask.nxv8i1(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i1> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsif.nxv16i1(
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsif_m_nxv16i1(<vscale x 16 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsif_m_nxv16i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmsif.m v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsif.nxv16i1(
-    <vscale x 16 x i1> %0,
-    i64 %1)
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i1>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsif_mask_m_nxv16i1_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv16i1_nxv16i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsif.m v10, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i1> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsif.nxv32i1(
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsif_m_nxv32i1(<vscale x 32 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsif_m_nxv32i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmsif.m v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsif.nxv32i1(
-    <vscale x 32 x i1> %0,
-    i64 %1)
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsif.mask.nxv32i1(
-  <vscale x 32 x i1>,
-  <vscale x 32 x i1>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsif_mask_m_nxv32i1_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv32i1_nxv32i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsif.m v10, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsif.mask.nxv32i1(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i1> %1,
-    <vscale x 32 x i1> %2,
-    i64 %3)
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 64 x i1> @llvm.riscv.vmsif.nxv64i1(
-  <vscale x 64 x i1>,
-  i64);
-
-define <vscale x 64 x i1> @intrinsic_vmsif_m_nxv64i1(<vscale x 64 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsif_m_nxv64i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT:    vmsif.m v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmsif.nxv64i1(
-    <vscale x 64 x i1> %0,
-    i64 %1)
-  ret <vscale x 64 x i1> %a
-}
-
-declare <vscale x 64 x i1> @llvm.riscv.vmsif.mask.nxv64i1(
-  <vscale x 64 x i1>,
-  <vscale x 64 x i1>,
-  <vscale x 64 x i1>,
-  i64);
-
-define <vscale x 64 x i1> @intrinsic_vmsif_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv64i1_nxv64i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, tu, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsif.m v10, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmsif.mask.nxv64i1(
-    <vscale x 64 x i1> %0,
-    <vscale x 64 x i1> %1,
-    <vscale x 64 x i1> %2,
-    i64 %3)
-  ret <vscale x 64 x i1> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsif.ll
similarity index 89%
rename from llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vmsif.ll
index 888b6ebbbc3f8..5b76892e5a3fb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsif.ll
@@ -1,11 +1,13 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1(
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsif_m_nxv1i1(<vscale x 1 x i1> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsif_m_nxv1i1(<vscale x 1 x i1> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsif_m_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -15,7 +17,7 @@ define <vscale x 1 x i1> @intrinsic_vmsif_m_nxv1i1(<vscale x 1 x i1> %0, i32 %1)
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1(
     <vscale x 1 x i1> %0,
-    i32 %1)
+    iXLen %1)
   ret <vscale x 1 x i1> %a
 }
 
@@ -23,9 +25,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsif.mask.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsif_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsif_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv1i1_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -39,15 +41,15 @@ entry:
     <vscale x 1 x i1> %0,
     <vscale x 1 x i1> %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
   ret <vscale x 1 x i1> %a
 }
 
 declare <vscale x 2 x i1> @llvm.riscv.vmsif.nxv2i1(
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsif_m_nxv2i1(<vscale x 2 x i1> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsif_m_nxv2i1(<vscale x 2 x i1> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsif_m_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -57,7 +59,7 @@ define <vscale x 2 x i1> @intrinsic_vmsif_m_nxv2i1(<vscale x 2 x i1> %0, i32 %1)
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsif.nxv2i1(
     <vscale x 2 x i1> %0,
-    i32 %1)
+    iXLen %1)
   ret <vscale x 2 x i1> %a
 }
 
@@ -65,9 +67,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsif.mask.nxv2i1(
   <vscale x 2 x i1>,
   <vscale x 2 x i1>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsif_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsif_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv2i1_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -81,15 +83,15 @@ entry:
     <vscale x 2 x i1> %0,
     <vscale x 2 x i1> %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
   ret <vscale x 2 x i1> %a
 }
 
 declare <vscale x 4 x i1> @llvm.riscv.vmsif.nxv4i1(
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsif_m_nxv4i1(<vscale x 4 x i1> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsif_m_nxv4i1(<vscale x 4 x i1> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsif_m_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -99,7 +101,7 @@ define <vscale x 4 x i1> @intrinsic_vmsif_m_nxv4i1(<vscale x 4 x i1> %0, i32 %1)
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsif.nxv4i1(
     <vscale x 4 x i1> %0,
-    i32 %1)
+    iXLen %1)
   ret <vscale x 4 x i1> %a
 }
 
@@ -107,9 +109,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsif.mask.nxv4i1(
   <vscale x 4 x i1>,
   <vscale x 4 x i1>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsif_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsif_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv4i1_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -123,15 +125,15 @@ entry:
     <vscale x 4 x i1> %0,
     <vscale x 4 x i1> %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
   ret <vscale x 4 x i1> %a
 }
 
 declare <vscale x 8 x i1> @llvm.riscv.vmsif.nxv8i1(
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsif_m_nxv8i1(<vscale x 8 x i1> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsif_m_nxv8i1(<vscale x 8 x i1> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsif_m_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -141,7 +143,7 @@ define <vscale x 8 x i1> @intrinsic_vmsif_m_nxv8i1(<vscale x 8 x i1> %0, i32 %1)
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsif.nxv8i1(
     <vscale x 8 x i1> %0,
-    i32 %1)
+    iXLen %1)
   ret <vscale x 8 x i1> %a
 }
 
@@ -149,9 +151,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsif.mask.nxv8i1(
   <vscale x 8 x i1>,
   <vscale x 8 x i1>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsif_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsif_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv8i1_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -165,15 +167,15 @@ entry:
     <vscale x 8 x i1> %0,
     <vscale x 8 x i1> %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
   ret <vscale x 8 x i1> %a
 }
 
 declare <vscale x 16 x i1> @llvm.riscv.vmsif.nxv16i1(
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsif_m_nxv16i1(<vscale x 16 x i1> %0, i32 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsif_m_nxv16i1(<vscale x 16 x i1> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsif_m_nxv16i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -183,7 +185,7 @@ define <vscale x 16 x i1> @intrinsic_vmsif_m_nxv16i1(<vscale x 16 x i1> %0, i32
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsif.nxv16i1(
     <vscale x 16 x i1> %0,
-    i32 %1)
+    iXLen %1)
   ret <vscale x 16 x i1> %a
 }
 
@@ -191,9 +193,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1(
   <vscale x 16 x i1>,
   <vscale x 16 x i1>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsif_mask_m_nxv16i1_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsif_mask_m_nxv16i1_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv16i1_nxv16i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -207,15 +209,15 @@ entry:
     <vscale x 16 x i1> %0,
     <vscale x 16 x i1> %1,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
   ret <vscale x 16 x i1> %a
 }
 
 declare <vscale x 32 x i1> @llvm.riscv.vmsif.nxv32i1(
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsif_m_nxv32i1(<vscale x 32 x i1> %0, i32 %1) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsif_m_nxv32i1(<vscale x 32 x i1> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsif_m_nxv32i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -225,7 +227,7 @@ define <vscale x 32 x i1> @intrinsic_vmsif_m_nxv32i1(<vscale x 32 x i1> %0, i32
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsif.nxv32i1(
     <vscale x 32 x i1> %0,
-    i32 %1)
+    iXLen %1)
   ret <vscale x 32 x i1> %a
 }
 
@@ -233,9 +235,9 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsif.mask.nxv32i1(
   <vscale x 32 x i1>,
   <vscale x 32 x i1>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsif_mask_m_nxv32i1_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsif_mask_m_nxv32i1_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv32i1_nxv32i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -249,15 +251,15 @@ entry:
     <vscale x 32 x i1> %0,
     <vscale x 32 x i1> %1,
     <vscale x 32 x i1> %2,
-    i32 %3)
+    iXLen %3)
   ret <vscale x 32 x i1> %a
 }
 
 declare <vscale x 64 x i1> @llvm.riscv.vmsif.nxv64i1(
   <vscale x 64 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i1> @intrinsic_vmsif_m_nxv64i1(<vscale x 64 x i1> %0, i32 %1) nounwind {
+define <vscale x 64 x i1> @intrinsic_vmsif_m_nxv64i1(<vscale x 64 x i1> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsif_m_nxv64i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
@@ -267,7 +269,7 @@ define <vscale x 64 x i1> @intrinsic_vmsif_m_nxv64i1(<vscale x 64 x i1> %0, i32
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsif.nxv64i1(
     <vscale x 64 x i1> %0,
-    i32 %1)
+    iXLen %1)
   ret <vscale x 64 x i1> %a
 }
 
@@ -275,9 +277,9 @@ declare <vscale x 64 x i1> @llvm.riscv.vmsif.mask.nxv64i1(
   <vscale x 64 x i1>,
   <vscale x 64 x i1>,
   <vscale x 64 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i1> @intrinsic_vmsif_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+define <vscale x 64 x i1> @intrinsic_vmsif_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv64i1_nxv64i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -291,6 +293,6 @@ entry:
     <vscale x 64 x i1> %0,
     <vscale x 64 x i1> %1,
     <vscale x 64 x i1> %2,
-    i32 %3)
+    iXLen %3)
   ret <vscale x 64 x i1> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll
deleted file mode 100644
index 8fba91102f814..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll
+++ /dev/null
@@ -1,296 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1(
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsof_m_nxv1i1(<vscale x 1 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsof_m_nxv1i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmsof.m v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1(
-    <vscale x 1 x i1> %0,
-    i64 %1)
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmsof.mask.nxv1i1(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i1>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmsof_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv1i1_nxv1i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsof.m v10, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsof.mask.nxv1i1(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i1> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsof.nxv2i1(
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsof_m_nxv2i1(<vscale x 2 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsof_m_nxv2i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmsof.m v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsof.nxv2i1(
-    <vscale x 2 x i1> %0,
-    i64 %1)
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmsof.mask.nxv2i1(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i1>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmsof_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv2i1_nxv2i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsof.m v10, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsof.mask.nxv2i1(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i1> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsof.nxv4i1(
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsof_m_nxv4i1(<vscale x 4 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsof_m_nxv4i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmsof.m v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsof.nxv4i1(
-    <vscale x 4 x i1> %0,
-    i64 %1)
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmsof.mask.nxv4i1(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i1>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmsof_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv4i1_nxv4i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsof.m v10, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsof.mask.nxv4i1(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i1> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsof.nxv8i1(
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsof_m_nxv8i1(<vscale x 8 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsof_m_nxv8i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmsof.m v8, v0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsof.nxv8i1(
-    <vscale x 8 x i1> %0,
-    i64 %1)
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmsof.mask.nxv8i1(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i1>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmsof_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv8i1_nxv8i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsof.m v10, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsof.mask.nxv8i1(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i1> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsof.nxv16i1(
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsof_m_nxv16i1(<vscale x 16 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsof_m_nxv16i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmsof.m v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsof.nxv16i1(
-    <vscale x 16 x i1> %0,
-    i64 %1)
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i1>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmsof_mask_m_nxv16i1_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv16i1_nxv16i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsof.m v10, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i1> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsof.nxv32i1(
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsof_m_nxv32i1(<vscale x 32 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsof_m_nxv32i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmsof.m v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsof.nxv32i1(
-    <vscale x 32 x i1> %0,
-    i64 %1)
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmsof.mask.nxv32i1(
-  <vscale x 32 x i1>,
-  <vscale x 32 x i1>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmsof_mask_m_nxv32i1_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv32i1_nxv32i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsof.m v10, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsof.mask.nxv32i1(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i1> %1,
-    <vscale x 32 x i1> %2,
-    i64 %3)
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 64 x i1> @llvm.riscv.vmsof.nxv64i1(
-  <vscale x 64 x i1>,
-  i64);
-
-define <vscale x 64 x i1> @intrinsic_vmsof_m_nxv64i1(<vscale x 64 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vmsof_m_nxv64i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT:    vmsof.m v8, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmsof.nxv64i1(
-    <vscale x 64 x i1> %0,
-    i64 %1)
-  ret <vscale x 64 x i1> %a
-}
-
-declare <vscale x 64 x i1> @llvm.riscv.vmsof.mask.nxv64i1(
-  <vscale x 64 x i1>,
-  <vscale x 64 x i1>,
-  <vscale x 64 x i1>,
-  i64);
-
-define <vscale x 64 x i1> @intrinsic_vmsof_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv64i1_nxv64i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, tu, mu
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsof.m v10, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmsof.mask.nxv64i1(
-    <vscale x 64 x i1> %0,
-    <vscale x 64 x i1> %1,
-    <vscale x 64 x i1> %2,
-    i64 %3)
-  ret <vscale x 64 x i1> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsof.ll
similarity index 89%
rename from llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vmsof.ll
index b5db2a4b284ad..290206244f3e1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsof.ll
@@ -1,11 +1,13 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1(
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsof_m_nxv1i1(<vscale x 1 x i1> %0, i32 %1) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsof_m_nxv1i1(<vscale x 1 x i1> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsof_m_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -15,7 +17,7 @@ define <vscale x 1 x i1> @intrinsic_vmsof_m_nxv1i1(<vscale x 1 x i1> %0, i32 %1)
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1(
     <vscale x 1 x i1> %0,
-    i32 %1)
+    iXLen %1)
   ret <vscale x 1 x i1> %a
 }
 
@@ -23,9 +25,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsof.mask.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmsof_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmsof_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv1i1_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -39,15 +41,15 @@ entry:
     <vscale x 1 x i1> %0,
     <vscale x 1 x i1> %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
   ret <vscale x 1 x i1> %a
 }
 
 declare <vscale x 2 x i1> @llvm.riscv.vmsof.nxv2i1(
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsof_m_nxv2i1(<vscale x 2 x i1> %0, i32 %1) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsof_m_nxv2i1(<vscale x 2 x i1> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsof_m_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -57,7 +59,7 @@ define <vscale x 2 x i1> @intrinsic_vmsof_m_nxv2i1(<vscale x 2 x i1> %0, i32 %1)
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsof.nxv2i1(
     <vscale x 2 x i1> %0,
-    i32 %1)
+    iXLen %1)
   ret <vscale x 2 x i1> %a
 }
 
@@ -65,9 +67,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsof.mask.nxv2i1(
   <vscale x 2 x i1>,
   <vscale x 2 x i1>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmsof_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmsof_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv2i1_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -81,15 +83,15 @@ entry:
     <vscale x 2 x i1> %0,
     <vscale x 2 x i1> %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
   ret <vscale x 2 x i1> %a
 }
 
 declare <vscale x 4 x i1> @llvm.riscv.vmsof.nxv4i1(
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsof_m_nxv4i1(<vscale x 4 x i1> %0, i32 %1) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsof_m_nxv4i1(<vscale x 4 x i1> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsof_m_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -99,7 +101,7 @@ define <vscale x 4 x i1> @intrinsic_vmsof_m_nxv4i1(<vscale x 4 x i1> %0, i32 %1)
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsof.nxv4i1(
     <vscale x 4 x i1> %0,
-    i32 %1)
+    iXLen %1)
   ret <vscale x 4 x i1> %a
 }
 
@@ -107,9 +109,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsof.mask.nxv4i1(
   <vscale x 4 x i1>,
   <vscale x 4 x i1>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmsof_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmsof_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv4i1_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -123,15 +125,15 @@ entry:
     <vscale x 4 x i1> %0,
     <vscale x 4 x i1> %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
   ret <vscale x 4 x i1> %a
 }
 
 declare <vscale x 8 x i1> @llvm.riscv.vmsof.nxv8i1(
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsof_m_nxv8i1(<vscale x 8 x i1> %0, i32 %1) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsof_m_nxv8i1(<vscale x 8 x i1> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsof_m_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -141,7 +143,7 @@ define <vscale x 8 x i1> @intrinsic_vmsof_m_nxv8i1(<vscale x 8 x i1> %0, i32 %1)
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsof.nxv8i1(
     <vscale x 8 x i1> %0,
-    i32 %1)
+    iXLen %1)
   ret <vscale x 8 x i1> %a
 }
 
@@ -149,9 +151,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsof.mask.nxv8i1(
   <vscale x 8 x i1>,
   <vscale x 8 x i1>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmsof_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmsof_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv8i1_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -165,15 +167,15 @@ entry:
     <vscale x 8 x i1> %0,
     <vscale x 8 x i1> %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
   ret <vscale x 8 x i1> %a
 }
 
 declare <vscale x 16 x i1> @llvm.riscv.vmsof.nxv16i1(
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsof_m_nxv16i1(<vscale x 16 x i1> %0, i32 %1) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsof_m_nxv16i1(<vscale x 16 x i1> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsof_m_nxv16i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -183,7 +185,7 @@ define <vscale x 16 x i1> @intrinsic_vmsof_m_nxv16i1(<vscale x 16 x i1> %0, i32
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsof.nxv16i1(
     <vscale x 16 x i1> %0,
-    i32 %1)
+    iXLen %1)
   ret <vscale x 16 x i1> %a
 }
 
@@ -191,9 +193,9 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1(
   <vscale x 16 x i1>,
   <vscale x 16 x i1>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmsof_mask_m_nxv16i1_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmsof_mask_m_nxv16i1_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv16i1_nxv16i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -207,15 +209,15 @@ entry:
     <vscale x 16 x i1> %0,
     <vscale x 16 x i1> %1,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
   ret <vscale x 16 x i1> %a
 }
 
 declare <vscale x 32 x i1> @llvm.riscv.vmsof.nxv32i1(
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsof_m_nxv32i1(<vscale x 32 x i1> %0, i32 %1) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsof_m_nxv32i1(<vscale x 32 x i1> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsof_m_nxv32i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -225,7 +227,7 @@ define <vscale x 32 x i1> @intrinsic_vmsof_m_nxv32i1(<vscale x 32 x i1> %0, i32
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsof.nxv32i1(
     <vscale x 32 x i1> %0,
-    i32 %1)
+    iXLen %1)
   ret <vscale x 32 x i1> %a
 }
 
@@ -233,9 +235,9 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsof.mask.nxv32i1(
   <vscale x 32 x i1>,
   <vscale x 32 x i1>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmsof_mask_m_nxv32i1_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmsof_mask_m_nxv32i1_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv32i1_nxv32i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -249,15 +251,15 @@ entry:
     <vscale x 32 x i1> %0,
     <vscale x 32 x i1> %1,
     <vscale x 32 x i1> %2,
-    i32 %3)
+    iXLen %3)
   ret <vscale x 32 x i1> %a
 }
 
 declare <vscale x 64 x i1> @llvm.riscv.vmsof.nxv64i1(
   <vscale x 64 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i1> @intrinsic_vmsof_m_nxv64i1(<vscale x 64 x i1> %0, i32 %1) nounwind {
+define <vscale x 64 x i1> @intrinsic_vmsof_m_nxv64i1(<vscale x 64 x i1> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vmsof_m_nxv64i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
@@ -267,7 +269,7 @@ define <vscale x 64 x i1> @intrinsic_vmsof_m_nxv64i1(<vscale x 64 x i1> %0, i32
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsof.nxv64i1(
     <vscale x 64 x i1> %0,
-    i32 %1)
+    iXLen %1)
   ret <vscale x 64 x i1> %a
 }
 
@@ -275,9 +277,9 @@ declare <vscale x 64 x i1> @llvm.riscv.vmsof.mask.nxv64i1(
   <vscale x 64 x i1>,
   <vscale x 64 x i1>,
   <vscale x 64 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i1> @intrinsic_vmsof_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+define <vscale x 64 x i1> @intrinsic_vmsof_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv64i1_nxv64i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v0
@@ -291,6 +293,6 @@ entry:
     <vscale x 64 x i1> %0,
     <vscale x 64 x i1> %1,
     <vscale x 64 x i1> %2,
-    i32 %3)
+    iXLen %3)
   ret <vscale x 64 x i1> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmxnor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmxnor-rv64.ll
deleted file mode 100644
index eb31e34b9c4be..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vmxnor-rv64.ll
+++ /dev/null
@@ -1,142 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmxnor.nxv1i1(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmxnor_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmxnor_mm_nxv1i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmxnor.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmxnor.nxv1i1(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmxnor.nxv2i1(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmxnor_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmxnor_mm_nxv2i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmxnor.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmxnor.nxv2i1(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmxnor.nxv4i1(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmxnor_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmxnor_mm_nxv4i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmxnor.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmxnor.nxv4i1(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmxnor.nxv8i1(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmxnor_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmxnor_mm_nxv8i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmxnor.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmxnor.nxv8i1(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmxnor.nxv16i1(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmxnor_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmxnor_mm_nxv16i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmxnor.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmxnor.nxv16i1(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmxnor.nxv32i1(
-  <vscale x 32 x i1>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmxnor_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmxnor_mm_nxv32i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmxnor.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmxnor.nxv32i1(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 64 x i1> @llvm.riscv.vmxnor.nxv64i1(
-  <vscale x 64 x i1>,
-  <vscale x 64 x i1>,
-  i64);
-
-define <vscale x 64 x i1> @intrinsic_vmxnor_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmxnor_mm_nxv64i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT:    vmxnor.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmxnor.nxv64i1(
-    <vscale x 64 x i1> %0,
-    <vscale x 64 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 64 x i1> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmxnor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmxnor.ll
similarity index 82%
rename from llvm/test/CodeGen/RISCV/rvv/vmxnor-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vmxnor.ll
index 82759bc9ea0b5..fbadb42494837 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmxnor-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmxnor.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmxnor.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmxnor_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmxnor_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmxnor_mm_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -16,7 +18,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmxnor.nxv1i1(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -24,9 +26,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmxnor.nxv2i1(
   <vscale x 2 x i1>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmxnor_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmxnor_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmxnor_mm_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -36,7 +38,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmxnor.nxv2i1(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -44,9 +46,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmxnor.nxv4i1(
   <vscale x 4 x i1>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmxnor_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmxnor_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmxnor_mm_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -56,7 +58,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmxnor.nxv4i1(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -64,9 +66,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmxnor.nxv8i1(
   <vscale x 8 x i1>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmxnor_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmxnor_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmxnor_mm_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -76,7 +78,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmxnor.nxv8i1(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -84,9 +86,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmxnor.nxv16i1(
   <vscale x 16 x i1>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmxnor_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmxnor_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmxnor_mm_nxv16i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -96,7 +98,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmxnor.nxv16i1(
     <vscale x 16 x i1> %0,
     <vscale x 16 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -104,9 +106,9 @@ entry:
 declare <vscale x 32 x i1> @llvm.riscv.vmxnor.nxv32i1(
   <vscale x 32 x i1>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmxnor_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmxnor_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmxnor_mm_nxv32i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -116,7 +118,7 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmxnor.nxv32i1(
     <vscale x 32 x i1> %0,
     <vscale x 32 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i1> %a
 }
@@ -124,9 +126,9 @@ entry:
 declare <vscale x 64 x i1> @llvm.riscv.vmxnor.nxv64i1(
   <vscale x 64 x i1>,
   <vscale x 64 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i1> @intrinsic_vmxnor_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
+define <vscale x 64 x i1> @intrinsic_vmxnor_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmxnor_mm_nxv64i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
@@ -136,7 +138,7 @@ entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmxnor.nxv64i1(
     <vscale x 64 x i1> %0,
     <vscale x 64 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i1> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmxor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmxor-rv64.ll
deleted file mode 100644
index 0e8d66b558315..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vmxor-rv64.ll
+++ /dev/null
@@ -1,142 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmxor.nxv1i1(
-  <vscale x 1 x i1>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i1> @intrinsic_vmxor_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmxor_mm_nxv1i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vmxor.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmxor.nxv1i1(
-    <vscale x 1 x i1> %0,
-    <vscale x 1 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmxor.nxv2i1(
-  <vscale x 2 x i1>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i1> @intrinsic_vmxor_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmxor_mm_nxv2i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vmxor.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmxor.nxv2i1(
-    <vscale x 2 x i1> %0,
-    <vscale x 2 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmxor.nxv4i1(
-  <vscale x 4 x i1>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i1> @intrinsic_vmxor_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmxor_mm_nxv4i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vmxor.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmxor.nxv4i1(
-    <vscale x 4 x i1> %0,
-    <vscale x 4 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmxor.nxv8i1(
-  <vscale x 8 x i1>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i1> @intrinsic_vmxor_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmxor_mm_nxv8i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vmxor.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmxor.nxv8i1(
-    <vscale x 8 x i1> %0,
-    <vscale x 8 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmxor.nxv16i1(
-  <vscale x 16 x i1>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i1> @intrinsic_vmxor_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmxor_mm_nxv16i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vmxor.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmxor.nxv16i1(
-    <vscale x 16 x i1> %0,
-    <vscale x 16 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmxor.nxv32i1(
-  <vscale x 32 x i1>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i1> @intrinsic_vmxor_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmxor_mm_nxv32i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vmxor.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmxor.nxv32i1(
-    <vscale x 32 x i1> %0,
-    <vscale x 32 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 64 x i1> @llvm.riscv.vmxor.nxv64i1(
-  <vscale x 64 x i1>,
-  <vscale x 64 x i1>,
-  i64);
-
-define <vscale x 64 x i1> @intrinsic_vmxor_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmxor_mm_nxv64i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT:    vmxor.mm v0, v0, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmxor.nxv64i1(
-    <vscale x 64 x i1> %0,
-    <vscale x 64 x i1> %1,
-    i64 %2)
-
-  ret <vscale x 64 x i1> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmxor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmxor.ll
similarity index 82%
rename from llvm/test/CodeGen/RISCV/rvv/vmxor-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vmxor.ll
index 15cb88f17599a..4a1900ed1538f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmxor-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmxor.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmxor.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i1> @intrinsic_vmxor_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+define <vscale x 1 x i1> @intrinsic_vmxor_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmxor_mm_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -16,7 +18,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmxor.nxv1i1(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i1> %a
 }
@@ -24,9 +26,9 @@ entry:
 declare <vscale x 2 x i1> @llvm.riscv.vmxor.nxv2i1(
   <vscale x 2 x i1>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i1> @intrinsic_vmxor_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+define <vscale x 2 x i1> @intrinsic_vmxor_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmxor_mm_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -36,7 +38,7 @@ entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmxor.nxv2i1(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i1> %a
 }
@@ -44,9 +46,9 @@ entry:
 declare <vscale x 4 x i1> @llvm.riscv.vmxor.nxv4i1(
   <vscale x 4 x i1>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i1> @intrinsic_vmxor_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+define <vscale x 4 x i1> @intrinsic_vmxor_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmxor_mm_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -56,7 +58,7 @@ entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmxor.nxv4i1(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i1> %a
 }
@@ -64,9 +66,9 @@ entry:
 declare <vscale x 8 x i1> @llvm.riscv.vmxor.nxv8i1(
   <vscale x 8 x i1>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i1> @intrinsic_vmxor_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+define <vscale x 8 x i1> @intrinsic_vmxor_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmxor_mm_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -76,7 +78,7 @@ entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmxor.nxv8i1(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i1> %a
 }
@@ -84,9 +86,9 @@ entry:
 declare <vscale x 16 x i1> @llvm.riscv.vmxor.nxv16i1(
   <vscale x 16 x i1>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i1> @intrinsic_vmxor_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+define <vscale x 16 x i1> @intrinsic_vmxor_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmxor_mm_nxv16i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -96,7 +98,7 @@ entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmxor.nxv16i1(
     <vscale x 16 x i1> %0,
     <vscale x 16 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i1> %a
 }
@@ -104,9 +106,9 @@ entry:
 declare <vscale x 32 x i1> @llvm.riscv.vmxor.nxv32i1(
   <vscale x 32 x i1>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i1> @intrinsic_vmxor_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
+define <vscale x 32 x i1> @intrinsic_vmxor_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmxor_mm_nxv32i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -116,7 +118,7 @@ entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmxor.nxv32i1(
     <vscale x 32 x i1> %0,
     <vscale x 32 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i1> %a
 }
@@ -124,9 +126,9 @@ entry:
 declare <vscale x 64 x i1> @llvm.riscv.vmxor.nxv64i1(
   <vscale x 64 x i1>,
   <vscale x 64 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i1> @intrinsic_vmxor_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
+define <vscale x 64 x i1> @intrinsic_vmxor_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmxor_mm_nxv64i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
@@ -136,7 +138,7 @@ entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmxor.nxv64i1(
     <vscale x 64 x i1> %0,
     <vscale x 64 x i1> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i1> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsm-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsm-rv64.ll
deleted file mode 100644
index 7fd84ccf35efa..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vsm-rv64.ll
+++ /dev/null
@@ -1,137 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-
-declare void @llvm.riscv.vsm.nxv1i1(<vscale x 1 x i1>, <vscale x 1 x i1>*, i64);
-
-define void @intrinsic_vsm_v_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsm_v_nxv1i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vsm.v v0, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsm.nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1>* %1, i64 %2)
-  ret void
-}
-
-declare void @llvm.riscv.vsm.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>*, i64);
-
-define void @intrinsic_vsm_v_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsm_v_nxv2i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vsm.v v0, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsm.nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1>* %1, i64 %2)
-  ret void
-}
-
-declare void @llvm.riscv.vsm.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>*, i64);
-
-define void @intrinsic_vsm_v_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsm_v_nxv4i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vsm.v v0, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsm.nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1>* %1, i64 %2)
-  ret void
-}
-
-declare void @llvm.riscv.vsm.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>*, i64);
-
-define void @intrinsic_vsm_v_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsm_v_nxv8i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vsm.v v0, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsm.nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1>* %1, i64 %2)
-  ret void
-}
-
-declare void @llvm.riscv.vsm.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>*, i64);
-
-define void @intrinsic_vsm_v_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsm_v_nxv16i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vsm.v v0, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsm.nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1>* %1, i64 %2)
-  ret void
-}
-
-declare void @llvm.riscv.vsm.nxv32i1(<vscale x 32 x i1>, <vscale x 32 x i1>*, i64);
-
-define void @intrinsic_vsm_v_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsm_v_nxv32i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vsm.v v0, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsm.nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1>* %1, i64 %2)
-  ret void
-}
-
-declare void @llvm.riscv.vsm.nxv64i1(<vscale x 64 x i1>, <vscale x 64 x i1>*, i64);
-
-define void @intrinsic_vsm_v_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsm_v_nxv64i1:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vsm.v v0, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsm.nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1>* %1, i64 %2)
-  ret void
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i64);
-
-; Make sure we can use the vsetvli from the producing instruction.
-define void @test_vsetvli_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1>* %2, i64 %3) nounwind {
-; CHECK-LABEL: test_vsetvli_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vmseq.vv v8, v8, v9
-; CHECK-NEXT:    vsm.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i64 %3)
-  call void @llvm.riscv.vsm.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1>* %2, i64 %3)
-  ret void
-}
-
-declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64);
-
-define void @test_vsetvli_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1>* %2, i64 %3) nounwind {
-; CHECK-LABEL: test_vsetvli_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vmseq.vv v8, v8, v9
-; CHECK-NEXT:    vsm.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i64 %3)
-  call void @llvm.riscv.vsm.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1>* %2, i64 %3)
-  ret void
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsm-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsm.ll
similarity index 79%
rename from llvm/test/CodeGen/RISCV/rvv/vsm-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vsm.ll
index 3285cdbfe2741..acbaa7a6d84b8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsm-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsm.ll
@@ -1,105 +1,107 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
 
-declare void @llvm.riscv.vsm.nxv1i1(<vscale x 1 x i1>, <vscale x 1 x i1>*, i32);
+declare void @llvm.riscv.vsm.nxv1i1(<vscale x 1 x i1>, <vscale x 1 x i1>*, iXLen);
 
-define void @intrinsic_vsm_v_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1>* %1, i32 %2) nounwind {
+define void @intrinsic_vsm_v_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsm_v_nxv1i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vsm.v v0, (a0)
 ; CHECK-NEXT:    ret
 entry:
-  call void @llvm.riscv.vsm.nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1>* %1, i32 %2)
+  call void @llvm.riscv.vsm.nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1>* %1, iXLen %2)
   ret void
 }
 
-declare void @llvm.riscv.vsm.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>*, i32);
+declare void @llvm.riscv.vsm.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>*, iXLen);
 
-define void @intrinsic_vsm_v_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1>* %1, i32 %2) nounwind {
+define void @intrinsic_vsm_v_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsm_v_nxv2i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vsm.v v0, (a0)
 ; CHECK-NEXT:    ret
 entry:
-  call void @llvm.riscv.vsm.nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1>* %1, i32 %2)
+  call void @llvm.riscv.vsm.nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1>* %1, iXLen %2)
   ret void
 }
 
-declare void @llvm.riscv.vsm.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>*, i32);
+declare void @llvm.riscv.vsm.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>*, iXLen);
 
-define void @intrinsic_vsm_v_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1>* %1, i32 %2) nounwind {
+define void @intrinsic_vsm_v_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsm_v_nxv4i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vsm.v v0, (a0)
 ; CHECK-NEXT:    ret
 entry:
-  call void @llvm.riscv.vsm.nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1>* %1, i32 %2)
+  call void @llvm.riscv.vsm.nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1>* %1, iXLen %2)
   ret void
 }
 
-declare void @llvm.riscv.vsm.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>*, i32);
+declare void @llvm.riscv.vsm.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>*, iXLen);
 
-define void @intrinsic_vsm_v_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1>* %1, i32 %2) nounwind {
+define void @intrinsic_vsm_v_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsm_v_nxv8i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vsm.v v0, (a0)
 ; CHECK-NEXT:    ret
 entry:
-  call void @llvm.riscv.vsm.nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1>* %1, i32 %2)
+  call void @llvm.riscv.vsm.nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1>* %1, iXLen %2)
   ret void
 }
 
-declare void @llvm.riscv.vsm.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>*, i32);
+declare void @llvm.riscv.vsm.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>*, iXLen);
 
-define void @intrinsic_vsm_v_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1>* %1, i32 %2) nounwind {
+define void @intrinsic_vsm_v_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsm_v_nxv16i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vsm.v v0, (a0)
 ; CHECK-NEXT:    ret
 entry:
-  call void @llvm.riscv.vsm.nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1>* %1, i32 %2)
+  call void @llvm.riscv.vsm.nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1>* %1, iXLen %2)
   ret void
 }
 
-declare void @llvm.riscv.vsm.nxv32i1(<vscale x 32 x i1>, <vscale x 32 x i1>*, i32);
+declare void @llvm.riscv.vsm.nxv32i1(<vscale x 32 x i1>, <vscale x 32 x i1>*, iXLen);
 
-define void @intrinsic_vsm_v_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1>* %1, i32 %2) nounwind {
+define void @intrinsic_vsm_v_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsm_v_nxv32i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    vsm.v v0, (a0)
 ; CHECK-NEXT:    ret
 entry:
-  call void @llvm.riscv.vsm.nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1>* %1, i32 %2)
+  call void @llvm.riscv.vsm.nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1>* %1, iXLen %2)
   ret void
 }
 
-declare void @llvm.riscv.vsm.nxv64i1(<vscale x 64 x i1>, <vscale x 64 x i1>*, i32);
+declare void @llvm.riscv.vsm.nxv64i1(<vscale x 64 x i1>, <vscale x 64 x i1>*, iXLen);
 
-define void @intrinsic_vsm_v_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1>* %1, i32 %2) nounwind {
+define void @intrinsic_vsm_v_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsm_v_nxv64i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    vsm.v v0, (a0)
 ; CHECK-NEXT:    ret
 entry:
-  call void @llvm.riscv.vsm.nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1>* %1, i32 %2)
+  call void @llvm.riscv.vsm.nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1>* %1, iXLen %2)
   ret void
 }
 
 declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
 ; Make sure we can use the vsetvli from the producing instruction.
-define void @test_vsetvli_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1>* %2, i32 %3) nounwind {
+define void @test_vsetvli_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1>* %2, iXLen %3) nounwind {
 ; CHECK-LABEL: test_vsetvli_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -110,17 +112,17 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i32 %3)
-  call void @llvm.riscv.vsm.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1>* %2, i32 %3)
+    iXLen %3)
+  call void @llvm.riscv.vsm.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1>* %2, iXLen %3)
   ret void
 }
 
 declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define void @test_vsetvli_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1>* %2, i32 %3) nounwind {
+define void @test_vsetvli_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1>* %2, iXLen %3) nounwind {
 ; CHECK-LABEL: test_vsetvli_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -131,7 +133,7 @@ entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 %3)
-  call void @llvm.riscv.vsm.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1>* %2, i32 %3)
+    iXLen %3)
+  call void @llvm.riscv.vsm.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1>* %2, iXLen %3)
   ret void
 }


        


More information about the llvm-commits mailing list